Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- CCEdit-main/src/controlnet11/annotator/pidinet/LICENSE +21 -0
- CCEdit-main/src/controlnet11/annotator/pidinet/__init__.py +36 -0
- CCEdit-main/src/controlnet11/annotator/pidinet/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/src/controlnet11/annotator/pidinet/__pycache__/model.cpython-39.pyc +0 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/__init__.py +19 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/ade.py +84 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/builder.py +169 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/chase_db1.py +27 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/cityscapes.py +217 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/custom.py +400 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/dataset_wrappers.py +50 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/drive.py +27 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/hrf.py +27 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pascal_context.py +103 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/__init__.py +16 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/compose.py +51 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/formating.py +288 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/loading.py +153 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/test_time_aug.py +133 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/transforms.py +889 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/stare.py +27 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/voc.py +29 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/models/necks/__init__.py +4 -0
- CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/utils/collect_env.py +17 -0
- CCEdit-main/src/controlnet11/cldm/cldm.py +438 -0
- CCEdit-main/src/controlnet11/cldm/ddim_hacked.py +317 -0
- CCEdit-main/src/controlnet11/cldm/hack.py +111 -0
- CCEdit-main/src/controlnet11/cldm/logger.py +76 -0
- CCEdit-main/src/controlnet11/cldm/model.py +28 -0
- CCEdit-main/src/controlnet11/github_docs/annotator.md +95 -0
- CCEdit-main/src/controlnet11/ldm/data/__init__.py +0 -0
- CCEdit-main/src/controlnet11/ldm/data/util.py +24 -0
- CCEdit-main/src/controlnet11/ldm/models/autoencoder.py +219 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/__init__.py +0 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/ddim.py +336 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/ddpm.py +1797 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/dpm_solver/__init__.py +1 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/dpm_solver/dpm_solver.py +1154 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/dpm_solver/sampler.py +87 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/plms.py +244 -0
- CCEdit-main/src/controlnet11/ldm/models/diffusion/sampling_util.py +22 -0
- CCEdit-main/src/controlnet11/ldm/modules/attention.py +341 -0
- CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/__init__.py +0 -0
- CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/model.py +852 -0
- CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/openaimodel.py +786 -0
- CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/upscaling.py +81 -0
- CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/util.py +270 -0
- CCEdit-main/src/controlnet11/ldm/modules/distributions/__init__.py +0 -0
- CCEdit-main/src/controlnet11/ldm/modules/distributions/distributions.py +92 -0
- CCEdit-main/src/controlnet11/ldm/modules/ema.py +80 -0
CCEdit-main/src/controlnet11/annotator/pidinet/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
It is just for research purpose, and commercial use should be contacted with authors first.
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2021 Zhuo Su
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
CCEdit-main/src/controlnet11/annotator/pidinet/__init__.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Pidinet
|
| 2 |
+
# https://github.com/hellozhuo/pidinet
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import torch
|
| 6 |
+
import numpy as np
|
| 7 |
+
from einops import rearrange
|
| 8 |
+
from annotator.pidinet.model import pidinet
|
| 9 |
+
from annotator.util import annotator_ckpts_path, safe_step
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PidiNetDetector:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/table5_pidinet.pth"
|
| 15 |
+
modelpath = os.path.join(annotator_ckpts_path, "table5_pidinet.pth")
|
| 16 |
+
if not os.path.exists(modelpath):
|
| 17 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 18 |
+
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
|
| 19 |
+
self.netNetwork = pidinet()
|
| 20 |
+
self.netNetwork.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(modelpath)['state_dict'].items()})
|
| 21 |
+
self.netNetwork = self.netNetwork.cuda()
|
| 22 |
+
self.netNetwork.eval()
|
| 23 |
+
|
| 24 |
+
def __call__(self, input_image, safe=False):
|
| 25 |
+
assert input_image.ndim == 3
|
| 26 |
+
input_image = input_image[:, :, ::-1].copy()
|
| 27 |
+
with torch.no_grad():
|
| 28 |
+
image_pidi = torch.from_numpy(input_image).float().cuda()
|
| 29 |
+
image_pidi = image_pidi / 255.0
|
| 30 |
+
image_pidi = rearrange(image_pidi, 'h w c -> 1 c h w')
|
| 31 |
+
edge = self.netNetwork(image_pidi)[-1]
|
| 32 |
+
edge = edge.cpu().numpy()
|
| 33 |
+
if safe:
|
| 34 |
+
edge = safe_step(edge)
|
| 35 |
+
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
|
| 36 |
+
return edge[0][0]
|
CCEdit-main/src/controlnet11/annotator/pidinet/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (1.84 kB). View file
|
|
|
CCEdit-main/src/controlnet11/annotator/pidinet/__pycache__/model.cpython-39.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .ade import ADE20KDataset
|
| 2 |
+
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
|
| 3 |
+
from .chase_db1 import ChaseDB1Dataset
|
| 4 |
+
from .cityscapes import CityscapesDataset
|
| 5 |
+
from .custom import CustomDataset
|
| 6 |
+
from .dataset_wrappers import ConcatDataset, RepeatDataset
|
| 7 |
+
from .drive import DRIVEDataset
|
| 8 |
+
from .hrf import HRFDataset
|
| 9 |
+
from .pascal_context import PascalContextDataset, PascalContextDataset59
|
| 10 |
+
from .stare import STAREDataset
|
| 11 |
+
from .voc import PascalVOCDataset
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
|
| 15 |
+
'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
|
| 16 |
+
'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
|
| 17 |
+
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
|
| 18 |
+
'STAREDataset'
|
| 19 |
+
]
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/ade.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .builder import DATASETS
|
| 2 |
+
from .custom import CustomDataset
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@DATASETS.register_module()
|
| 6 |
+
class ADE20KDataset(CustomDataset):
|
| 7 |
+
"""ADE20K dataset.
|
| 8 |
+
|
| 9 |
+
In segmentation map annotation for ADE20K, 0 stands for background, which
|
| 10 |
+
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
|
| 11 |
+
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
|
| 12 |
+
'.png'.
|
| 13 |
+
"""
|
| 14 |
+
CLASSES = (
|
| 15 |
+
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
|
| 16 |
+
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
|
| 17 |
+
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
|
| 18 |
+
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
|
| 19 |
+
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
|
| 20 |
+
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
|
| 21 |
+
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
|
| 22 |
+
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
|
| 23 |
+
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
|
| 24 |
+
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
|
| 25 |
+
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
|
| 26 |
+
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
|
| 27 |
+
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
|
| 28 |
+
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
|
| 29 |
+
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
|
| 30 |
+
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
|
| 31 |
+
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
|
| 32 |
+
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
|
| 33 |
+
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
|
| 34 |
+
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
|
| 35 |
+
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
|
| 36 |
+
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
|
| 37 |
+
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
|
| 38 |
+
'clock', 'flag')
|
| 39 |
+
|
| 40 |
+
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 41 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 42 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 43 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 44 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 45 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 46 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 47 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 48 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 49 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 50 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 51 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 52 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 53 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 54 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
| 55 |
+
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
| 56 |
+
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
| 57 |
+
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
| 58 |
+
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
| 59 |
+
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
| 60 |
+
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
| 61 |
+
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
| 62 |
+
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
| 63 |
+
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
| 64 |
+
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
| 65 |
+
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
| 66 |
+
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
| 67 |
+
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
| 68 |
+
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
| 69 |
+
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
| 70 |
+
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
| 71 |
+
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
| 72 |
+
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
| 73 |
+
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
| 74 |
+
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
| 75 |
+
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
| 76 |
+
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
| 77 |
+
[102, 255, 0], [92, 0, 255]]
|
| 78 |
+
|
| 79 |
+
def __init__(self, **kwargs):
|
| 80 |
+
super(ADE20KDataset, self).__init__(
|
| 81 |
+
img_suffix='.jpg',
|
| 82 |
+
seg_map_suffix='.png',
|
| 83 |
+
reduce_zero_label=True,
|
| 84 |
+
**kwargs)
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/builder.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import platform
|
| 3 |
+
import random
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from annotator.uniformer.mmcv.parallel import collate
|
| 8 |
+
from annotator.uniformer.mmcv.runner import get_dist_info
|
| 9 |
+
from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
|
| 10 |
+
from annotator.uniformer.mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
|
| 11 |
+
from torch.utils.data import DistributedSampler
|
| 12 |
+
|
| 13 |
+
if platform.system() != 'Windows':
|
| 14 |
+
# https://github.com/pytorch/pytorch/issues/973
|
| 15 |
+
import resource
|
| 16 |
+
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
|
| 17 |
+
hard_limit = rlimit[1]
|
| 18 |
+
soft_limit = min(4096, hard_limit)
|
| 19 |
+
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
|
| 20 |
+
|
| 21 |
+
DATASETS = Registry('dataset')
|
| 22 |
+
PIPELINES = Registry('pipeline')
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _concat_dataset(cfg, default_args=None):
|
| 26 |
+
"""Build :obj:`ConcatDataset by."""
|
| 27 |
+
from .dataset_wrappers import ConcatDataset
|
| 28 |
+
img_dir = cfg['img_dir']
|
| 29 |
+
ann_dir = cfg.get('ann_dir', None)
|
| 30 |
+
split = cfg.get('split', None)
|
| 31 |
+
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
|
| 32 |
+
if ann_dir is not None:
|
| 33 |
+
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
|
| 34 |
+
else:
|
| 35 |
+
num_ann_dir = 0
|
| 36 |
+
if split is not None:
|
| 37 |
+
num_split = len(split) if isinstance(split, (list, tuple)) else 1
|
| 38 |
+
else:
|
| 39 |
+
num_split = 0
|
| 40 |
+
if num_img_dir > 1:
|
| 41 |
+
assert num_img_dir == num_ann_dir or num_ann_dir == 0
|
| 42 |
+
assert num_img_dir == num_split or num_split == 0
|
| 43 |
+
else:
|
| 44 |
+
assert num_split == num_ann_dir or num_ann_dir <= 1
|
| 45 |
+
num_dset = max(num_split, num_img_dir)
|
| 46 |
+
|
| 47 |
+
datasets = []
|
| 48 |
+
for i in range(num_dset):
|
| 49 |
+
data_cfg = copy.deepcopy(cfg)
|
| 50 |
+
if isinstance(img_dir, (list, tuple)):
|
| 51 |
+
data_cfg['img_dir'] = img_dir[i]
|
| 52 |
+
if isinstance(ann_dir, (list, tuple)):
|
| 53 |
+
data_cfg['ann_dir'] = ann_dir[i]
|
| 54 |
+
if isinstance(split, (list, tuple)):
|
| 55 |
+
data_cfg['split'] = split[i]
|
| 56 |
+
datasets.append(build_dataset(data_cfg, default_args))
|
| 57 |
+
|
| 58 |
+
return ConcatDataset(datasets)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def build_dataset(cfg, default_args=None):
|
| 62 |
+
"""Build datasets."""
|
| 63 |
+
from .dataset_wrappers import ConcatDataset, RepeatDataset
|
| 64 |
+
if isinstance(cfg, (list, tuple)):
|
| 65 |
+
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
|
| 66 |
+
elif cfg['type'] == 'RepeatDataset':
|
| 67 |
+
dataset = RepeatDataset(
|
| 68 |
+
build_dataset(cfg['dataset'], default_args), cfg['times'])
|
| 69 |
+
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
|
| 70 |
+
cfg.get('split', None), (list, tuple)):
|
| 71 |
+
dataset = _concat_dataset(cfg, default_args)
|
| 72 |
+
else:
|
| 73 |
+
dataset = build_from_cfg(cfg, DATASETS, default_args)
|
| 74 |
+
|
| 75 |
+
return dataset
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def build_dataloader(dataset,
|
| 79 |
+
samples_per_gpu,
|
| 80 |
+
workers_per_gpu,
|
| 81 |
+
num_gpus=1,
|
| 82 |
+
dist=True,
|
| 83 |
+
shuffle=True,
|
| 84 |
+
seed=None,
|
| 85 |
+
drop_last=False,
|
| 86 |
+
pin_memory=True,
|
| 87 |
+
dataloader_type='PoolDataLoader',
|
| 88 |
+
**kwargs):
|
| 89 |
+
"""Build PyTorch DataLoader.
|
| 90 |
+
|
| 91 |
+
In distributed training, each GPU/process has a dataloader.
|
| 92 |
+
In non-distributed training, there is only one dataloader for all GPUs.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
dataset (Dataset): A PyTorch dataset.
|
| 96 |
+
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
|
| 97 |
+
batch size of each GPU.
|
| 98 |
+
workers_per_gpu (int): How many subprocesses to use for data loading
|
| 99 |
+
for each GPU.
|
| 100 |
+
num_gpus (int): Number of GPUs. Only used in non-distributed training.
|
| 101 |
+
dist (bool): Distributed training/test or not. Default: True.
|
| 102 |
+
shuffle (bool): Whether to shuffle the data at every epoch.
|
| 103 |
+
Default: True.
|
| 104 |
+
seed (int | None): Seed to be used. Default: None.
|
| 105 |
+
drop_last (bool): Whether to drop the last incomplete batch in epoch.
|
| 106 |
+
Default: False
|
| 107 |
+
pin_memory (bool): Whether to use pin_memory in DataLoader.
|
| 108 |
+
Default: True
|
| 109 |
+
dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
|
| 110 |
+
kwargs: any keyword argument to be used to initialize DataLoader
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
DataLoader: A PyTorch dataloader.
|
| 114 |
+
"""
|
| 115 |
+
rank, world_size = get_dist_info()
|
| 116 |
+
if dist:
|
| 117 |
+
sampler = DistributedSampler(
|
| 118 |
+
dataset, world_size, rank, shuffle=shuffle)
|
| 119 |
+
shuffle = False
|
| 120 |
+
batch_size = samples_per_gpu
|
| 121 |
+
num_workers = workers_per_gpu
|
| 122 |
+
else:
|
| 123 |
+
sampler = None
|
| 124 |
+
batch_size = num_gpus * samples_per_gpu
|
| 125 |
+
num_workers = num_gpus * workers_per_gpu
|
| 126 |
+
|
| 127 |
+
init_fn = partial(
|
| 128 |
+
worker_init_fn, num_workers=num_workers, rank=rank,
|
| 129 |
+
seed=seed) if seed is not None else None
|
| 130 |
+
|
| 131 |
+
assert dataloader_type in (
|
| 132 |
+
'DataLoader',
|
| 133 |
+
'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
|
| 134 |
+
|
| 135 |
+
if dataloader_type == 'PoolDataLoader':
|
| 136 |
+
dataloader = PoolDataLoader
|
| 137 |
+
elif dataloader_type == 'DataLoader':
|
| 138 |
+
dataloader = DataLoader
|
| 139 |
+
|
| 140 |
+
data_loader = dataloader(
|
| 141 |
+
dataset,
|
| 142 |
+
batch_size=batch_size,
|
| 143 |
+
sampler=sampler,
|
| 144 |
+
num_workers=num_workers,
|
| 145 |
+
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
|
| 146 |
+
pin_memory=pin_memory,
|
| 147 |
+
shuffle=shuffle,
|
| 148 |
+
worker_init_fn=init_fn,
|
| 149 |
+
drop_last=drop_last,
|
| 150 |
+
**kwargs)
|
| 151 |
+
|
| 152 |
+
return data_loader
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def worker_init_fn(worker_id, num_workers, rank, seed):
|
| 156 |
+
"""Worker init func for dataloader.
|
| 157 |
+
|
| 158 |
+
The seed of each worker equals to num_worker * rank + worker_id + user_seed
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
worker_id (int): Worker id.
|
| 162 |
+
num_workers (int): Number of workers.
|
| 163 |
+
rank (int): The rank of current process.
|
| 164 |
+
seed (int): The random seed to use.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
worker_seed = num_workers * rank + worker_id + seed
|
| 168 |
+
np.random.seed(worker_seed)
|
| 169 |
+
random.seed(worker_seed)
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/chase_db1.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class ChaseDB1Dataset(CustomDataset):
|
| 9 |
+
"""Chase_db1 dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for Chase_db1, 0 stands for background,
|
| 12 |
+
which is included in 2 categories. ``reduce_zero_label`` is fixed to False.
|
| 13 |
+
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_1stHO.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(ChaseDB1Dataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='_1stHO.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/cityscapes.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
import tempfile
|
| 3 |
+
|
| 4 |
+
import annotator.uniformer.mmcv as mmcv
|
| 5 |
+
import numpy as np
|
| 6 |
+
from annotator.uniformer.mmcv.utils import print_log
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
from .builder import DATASETS
|
| 10 |
+
from .custom import CustomDataset
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@DATASETS.register_module()
|
| 14 |
+
class CityscapesDataset(CustomDataset):
|
| 15 |
+
"""Cityscapes dataset.
|
| 16 |
+
|
| 17 |
+
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
|
| 18 |
+
fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
|
| 22 |
+
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
|
| 23 |
+
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
|
| 24 |
+
'bicycle')
|
| 25 |
+
|
| 26 |
+
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
|
| 27 |
+
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
|
| 28 |
+
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
|
| 29 |
+
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
|
| 30 |
+
[0, 80, 100], [0, 0, 230], [119, 11, 32]]
|
| 31 |
+
|
| 32 |
+
def __init__(self, **kwargs):
|
| 33 |
+
super(CityscapesDataset, self).__init__(
|
| 34 |
+
img_suffix='_leftImg8bit.png',
|
| 35 |
+
seg_map_suffix='_gtFine_labelTrainIds.png',
|
| 36 |
+
**kwargs)
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def _convert_to_label_id(result):
|
| 40 |
+
"""Convert trainId to id for cityscapes."""
|
| 41 |
+
if isinstance(result, str):
|
| 42 |
+
result = np.load(result)
|
| 43 |
+
import cityscapesscripts.helpers.labels as CSLabels
|
| 44 |
+
result_copy = result.copy()
|
| 45 |
+
for trainId, label in CSLabels.trainId2label.items():
|
| 46 |
+
result_copy[result == trainId] = label.id
|
| 47 |
+
|
| 48 |
+
return result_copy
|
| 49 |
+
|
| 50 |
+
def results2img(self, results, imgfile_prefix, to_label_id):
|
| 51 |
+
"""Write the segmentation results to images.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
results (list[list | tuple | ndarray]): Testing results of the
|
| 55 |
+
dataset.
|
| 56 |
+
imgfile_prefix (str): The filename prefix of the png files.
|
| 57 |
+
If the prefix is "somepath/xxx",
|
| 58 |
+
the png files will be named "somepath/xxx.png".
|
| 59 |
+
to_label_id (bool): whether convert output to label_id for
|
| 60 |
+
submission
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
list[str: str]: result txt files which contains corresponding
|
| 64 |
+
semantic segmentation images.
|
| 65 |
+
"""
|
| 66 |
+
mmcv.mkdir_or_exist(imgfile_prefix)
|
| 67 |
+
result_files = []
|
| 68 |
+
prog_bar = mmcv.ProgressBar(len(self))
|
| 69 |
+
for idx in range(len(self)):
|
| 70 |
+
result = results[idx]
|
| 71 |
+
if to_label_id:
|
| 72 |
+
result = self._convert_to_label_id(result)
|
| 73 |
+
filename = self.img_infos[idx]['filename']
|
| 74 |
+
basename = osp.splitext(osp.basename(filename))[0]
|
| 75 |
+
|
| 76 |
+
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
|
| 77 |
+
|
| 78 |
+
output = Image.fromarray(result.astype(np.uint8)).convert('P')
|
| 79 |
+
import cityscapesscripts.helpers.labels as CSLabels
|
| 80 |
+
palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
|
| 81 |
+
for label_id, label in CSLabels.id2label.items():
|
| 82 |
+
palette[label_id] = label.color
|
| 83 |
+
|
| 84 |
+
output.putpalette(palette)
|
| 85 |
+
output.save(png_filename)
|
| 86 |
+
result_files.append(png_filename)
|
| 87 |
+
prog_bar.update()
|
| 88 |
+
|
| 89 |
+
return result_files
|
| 90 |
+
|
| 91 |
+
def format_results(self, results, imgfile_prefix=None, to_label_id=True):
|
| 92 |
+
"""Format the results into dir (standard format for Cityscapes
|
| 93 |
+
evaluation).
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
results (list): Testing results of the dataset.
|
| 97 |
+
imgfile_prefix (str | None): The prefix of images files. It
|
| 98 |
+
includes the file path and the prefix of filename, e.g.,
|
| 99 |
+
"a/b/prefix". If not specified, a temp file will be created.
|
| 100 |
+
Default: None.
|
| 101 |
+
to_label_id (bool): whether convert output to label_id for
|
| 102 |
+
submission. Default: False
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
tuple: (result_files, tmp_dir), result_files is a list containing
|
| 106 |
+
the image paths, tmp_dir is the temporal directory created
|
| 107 |
+
for saving json/png files when img_prefix is not specified.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
assert isinstance(results, list), 'results must be a list'
|
| 111 |
+
assert len(results) == len(self), (
|
| 112 |
+
'The length of results is not equal to the dataset len: '
|
| 113 |
+
f'{len(results)} != {len(self)}')
|
| 114 |
+
|
| 115 |
+
if imgfile_prefix is None:
|
| 116 |
+
tmp_dir = tempfile.TemporaryDirectory()
|
| 117 |
+
imgfile_prefix = tmp_dir.name
|
| 118 |
+
else:
|
| 119 |
+
tmp_dir = None
|
| 120 |
+
result_files = self.results2img(results, imgfile_prefix, to_label_id)
|
| 121 |
+
|
| 122 |
+
return result_files, tmp_dir
|
| 123 |
+
|
| 124 |
+
def evaluate(self,
|
| 125 |
+
results,
|
| 126 |
+
metric='mIoU',
|
| 127 |
+
logger=None,
|
| 128 |
+
imgfile_prefix=None,
|
| 129 |
+
efficient_test=False):
|
| 130 |
+
"""Evaluation in Cityscapes/default protocol.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
results (list): Testing results of the dataset.
|
| 134 |
+
metric (str | list[str]): Metrics to be evaluated.
|
| 135 |
+
logger (logging.Logger | None | str): Logger used for printing
|
| 136 |
+
related information during evaluation. Default: None.
|
| 137 |
+
imgfile_prefix (str | None): The prefix of output image file,
|
| 138 |
+
for cityscapes evaluation only. It includes the file path and
|
| 139 |
+
the prefix of filename, e.g., "a/b/prefix".
|
| 140 |
+
If results are evaluated with cityscapes protocol, it would be
|
| 141 |
+
the prefix of output png files. The output files would be
|
| 142 |
+
png images under folder "a/b/prefix/xxx.png", where "xxx" is
|
| 143 |
+
the image name of cityscapes. If not specified, a temp file
|
| 144 |
+
will be created for evaluation.
|
| 145 |
+
Default: None.
|
| 146 |
+
|
| 147 |
+
Returns:
|
| 148 |
+
dict[str, float]: Cityscapes/default metrics.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
eval_results = dict()
|
| 152 |
+
metrics = metric.copy() if isinstance(metric, list) else [metric]
|
| 153 |
+
if 'cityscapes' in metrics:
|
| 154 |
+
eval_results.update(
|
| 155 |
+
self._evaluate_cityscapes(results, logger, imgfile_prefix))
|
| 156 |
+
metrics.remove('cityscapes')
|
| 157 |
+
if len(metrics) > 0:
|
| 158 |
+
eval_results.update(
|
| 159 |
+
super(CityscapesDataset,
|
| 160 |
+
self).evaluate(results, metrics, logger, efficient_test))
|
| 161 |
+
|
| 162 |
+
return eval_results
|
| 163 |
+
|
| 164 |
+
def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
|
| 165 |
+
"""Evaluation in Cityscapes protocol.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
results (list): Testing results of the dataset.
|
| 169 |
+
logger (logging.Logger | str | None): Logger used for printing
|
| 170 |
+
related information during evaluation. Default: None.
|
| 171 |
+
imgfile_prefix (str | None): The prefix of output image file
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
dict[str: float]: Cityscapes evaluation results.
|
| 175 |
+
"""
|
| 176 |
+
try:
|
| 177 |
+
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
|
| 178 |
+
except ImportError:
|
| 179 |
+
raise ImportError('Please run "pip install cityscapesscripts" to '
|
| 180 |
+
'install cityscapesscripts first.')
|
| 181 |
+
msg = 'Evaluating in Cityscapes style'
|
| 182 |
+
if logger is None:
|
| 183 |
+
msg = '\n' + msg
|
| 184 |
+
print_log(msg, logger=logger)
|
| 185 |
+
|
| 186 |
+
result_files, tmp_dir = self.format_results(results, imgfile_prefix)
|
| 187 |
+
|
| 188 |
+
if tmp_dir is None:
|
| 189 |
+
result_dir = imgfile_prefix
|
| 190 |
+
else:
|
| 191 |
+
result_dir = tmp_dir.name
|
| 192 |
+
|
| 193 |
+
eval_results = dict()
|
| 194 |
+
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
|
| 195 |
+
|
| 196 |
+
CSEval.args.evalInstLevelScore = True
|
| 197 |
+
CSEval.args.predictionPath = osp.abspath(result_dir)
|
| 198 |
+
CSEval.args.evalPixelAccuracy = True
|
| 199 |
+
CSEval.args.JSONOutput = False
|
| 200 |
+
|
| 201 |
+
seg_map_list = []
|
| 202 |
+
pred_list = []
|
| 203 |
+
|
| 204 |
+
# when evaluating with official cityscapesscripts,
|
| 205 |
+
# **_gtFine_labelIds.png is used
|
| 206 |
+
for seg_map in mmcv.scandir(
|
| 207 |
+
self.ann_dir, 'gtFine_labelIds.png', recursive=True):
|
| 208 |
+
seg_map_list.append(osp.join(self.ann_dir, seg_map))
|
| 209 |
+
pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
|
| 210 |
+
|
| 211 |
+
eval_results.update(
|
| 212 |
+
CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
|
| 213 |
+
|
| 214 |
+
if tmp_dir is not None:
|
| 215 |
+
tmp_dir.cleanup()
|
| 216 |
+
|
| 217 |
+
return eval_results
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/custom.py
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import os.path as osp
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
from functools import reduce
|
| 5 |
+
|
| 6 |
+
import annotator.uniformer.mmcv as mmcv
|
| 7 |
+
import numpy as np
|
| 8 |
+
from annotator.uniformer.mmcv.utils import print_log
|
| 9 |
+
from prettytable import PrettyTable
|
| 10 |
+
from torch.utils.data import Dataset
|
| 11 |
+
|
| 12 |
+
from annotator.uniformer.mmseg.core import eval_metrics
|
| 13 |
+
from annotator.uniformer.mmseg.utils import get_root_logger
|
| 14 |
+
from .builder import DATASETS
|
| 15 |
+
from .pipelines import Compose
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@DATASETS.register_module()
|
| 19 |
+
class CustomDataset(Dataset):
|
| 20 |
+
"""Custom dataset for semantic segmentation. An example of file structure
|
| 21 |
+
is as followed.
|
| 22 |
+
|
| 23 |
+
.. code-block:: none
|
| 24 |
+
|
| 25 |
+
├── data
|
| 26 |
+
│ ├── my_dataset
|
| 27 |
+
│ │ ├── img_dir
|
| 28 |
+
│ │ │ ├── train
|
| 29 |
+
│ │ │ │ ├── xxx{img_suffix}
|
| 30 |
+
│ │ │ │ ├── yyy{img_suffix}
|
| 31 |
+
│ │ │ │ ├── zzz{img_suffix}
|
| 32 |
+
│ │ │ ├── val
|
| 33 |
+
│ │ ├── ann_dir
|
| 34 |
+
│ │ │ ├── train
|
| 35 |
+
│ │ │ │ ├── xxx{seg_map_suffix}
|
| 36 |
+
│ │ │ │ ├── yyy{seg_map_suffix}
|
| 37 |
+
│ │ │ │ ├── zzz{seg_map_suffix}
|
| 38 |
+
│ │ │ ├── val
|
| 39 |
+
|
| 40 |
+
The img/gt_semantic_seg pair of CustomDataset should be of the same
|
| 41 |
+
except suffix. A valid img/gt_semantic_seg filename pair should be like
|
| 42 |
+
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
|
| 43 |
+
in the suffix). If split is given, then ``xxx`` is specified in txt file.
|
| 44 |
+
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
|
| 45 |
+
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
pipeline (list[dict]): Processing pipeline
|
| 50 |
+
img_dir (str): Path to image directory
|
| 51 |
+
img_suffix (str): Suffix of images. Default: '.jpg'
|
| 52 |
+
ann_dir (str, optional): Path to annotation directory. Default: None
|
| 53 |
+
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
|
| 54 |
+
split (str, optional): Split txt file. If split is specified, only
|
| 55 |
+
file with suffix in the splits will be loaded. Otherwise, all
|
| 56 |
+
images in img_dir/ann_dir will be loaded. Default: None
|
| 57 |
+
data_root (str, optional): Data root for img_dir/ann_dir. Default:
|
| 58 |
+
None.
|
| 59 |
+
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
|
| 60 |
+
ignore_index (int): The label index to be ignored. Default: 255
|
| 61 |
+
reduce_zero_label (bool): Whether to mark label zero as ignored.
|
| 62 |
+
Default: False
|
| 63 |
+
classes (str | Sequence[str], optional): Specify classes to load.
|
| 64 |
+
If is None, ``cls.CLASSES`` will be used. Default: None.
|
| 65 |
+
palette (Sequence[Sequence[int]]] | np.ndarray | None):
|
| 66 |
+
The palette of segmentation map. If None is given, and
|
| 67 |
+
self.PALETTE is None, random palette will be generated.
|
| 68 |
+
Default: None
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
CLASSES = None
|
| 72 |
+
|
| 73 |
+
PALETTE = None
|
| 74 |
+
|
| 75 |
+
def __init__(self,
|
| 76 |
+
pipeline,
|
| 77 |
+
img_dir,
|
| 78 |
+
img_suffix='.jpg',
|
| 79 |
+
ann_dir=None,
|
| 80 |
+
seg_map_suffix='.png',
|
| 81 |
+
split=None,
|
| 82 |
+
data_root=None,
|
| 83 |
+
test_mode=False,
|
| 84 |
+
ignore_index=255,
|
| 85 |
+
reduce_zero_label=False,
|
| 86 |
+
classes=None,
|
| 87 |
+
palette=None):
|
| 88 |
+
self.pipeline = Compose(pipeline)
|
| 89 |
+
self.img_dir = img_dir
|
| 90 |
+
self.img_suffix = img_suffix
|
| 91 |
+
self.ann_dir = ann_dir
|
| 92 |
+
self.seg_map_suffix = seg_map_suffix
|
| 93 |
+
self.split = split
|
| 94 |
+
self.data_root = data_root
|
| 95 |
+
self.test_mode = test_mode
|
| 96 |
+
self.ignore_index = ignore_index
|
| 97 |
+
self.reduce_zero_label = reduce_zero_label
|
| 98 |
+
self.label_map = None
|
| 99 |
+
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
|
| 100 |
+
classes, palette)
|
| 101 |
+
|
| 102 |
+
# join paths if data_root is specified
|
| 103 |
+
if self.data_root is not None:
|
| 104 |
+
if not osp.isabs(self.img_dir):
|
| 105 |
+
self.img_dir = osp.join(self.data_root, self.img_dir)
|
| 106 |
+
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
|
| 107 |
+
self.ann_dir = osp.join(self.data_root, self.ann_dir)
|
| 108 |
+
if not (self.split is None or osp.isabs(self.split)):
|
| 109 |
+
self.split = osp.join(self.data_root, self.split)
|
| 110 |
+
|
| 111 |
+
# load annotations
|
| 112 |
+
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
|
| 113 |
+
self.ann_dir,
|
| 114 |
+
self.seg_map_suffix, self.split)
|
| 115 |
+
|
| 116 |
+
def __len__(self):
|
| 117 |
+
"""Total number of samples of data."""
|
| 118 |
+
return len(self.img_infos)
|
| 119 |
+
|
| 120 |
+
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
|
| 121 |
+
split):
|
| 122 |
+
"""Load annotation from directory.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
img_dir (str): Path to image directory
|
| 126 |
+
img_suffix (str): Suffix of images.
|
| 127 |
+
ann_dir (str|None): Path to annotation directory.
|
| 128 |
+
seg_map_suffix (str|None): Suffix of segmentation maps.
|
| 129 |
+
split (str|None): Split txt file. If split is specified, only file
|
| 130 |
+
with suffix in the splits will be loaded. Otherwise, all images
|
| 131 |
+
in img_dir/ann_dir will be loaded. Default: None
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
list[dict]: All image info of dataset.
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
img_infos = []
|
| 138 |
+
if split is not None:
|
| 139 |
+
with open(split) as f:
|
| 140 |
+
for line in f:
|
| 141 |
+
img_name = line.strip()
|
| 142 |
+
img_info = dict(filename=img_name + img_suffix)
|
| 143 |
+
if ann_dir is not None:
|
| 144 |
+
seg_map = img_name + seg_map_suffix
|
| 145 |
+
img_info['ann'] = dict(seg_map=seg_map)
|
| 146 |
+
img_infos.append(img_info)
|
| 147 |
+
else:
|
| 148 |
+
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
|
| 149 |
+
img_info = dict(filename=img)
|
| 150 |
+
if ann_dir is not None:
|
| 151 |
+
seg_map = img.replace(img_suffix, seg_map_suffix)
|
| 152 |
+
img_info['ann'] = dict(seg_map=seg_map)
|
| 153 |
+
img_infos.append(img_info)
|
| 154 |
+
|
| 155 |
+
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
|
| 156 |
+
return img_infos
|
| 157 |
+
|
| 158 |
+
def get_ann_info(self, idx):
|
| 159 |
+
"""Get annotation by index.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
idx (int): Index of data.
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
dict: Annotation info of specified index.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
return self.img_infos[idx]['ann']
|
| 169 |
+
|
| 170 |
+
def pre_pipeline(self, results):
|
| 171 |
+
"""Prepare results dict for pipeline."""
|
| 172 |
+
results['seg_fields'] = []
|
| 173 |
+
results['img_prefix'] = self.img_dir
|
| 174 |
+
results['seg_prefix'] = self.ann_dir
|
| 175 |
+
if self.custom_classes:
|
| 176 |
+
results['label_map'] = self.label_map
|
| 177 |
+
|
| 178 |
+
def __getitem__(self, idx):
|
| 179 |
+
"""Get training/test data after pipeline.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
idx (int): Index of data.
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
dict: Training/test data (with annotation if `test_mode` is set
|
| 186 |
+
False).
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
if self.test_mode:
|
| 190 |
+
return self.prepare_test_img(idx)
|
| 191 |
+
else:
|
| 192 |
+
return self.prepare_train_img(idx)
|
| 193 |
+
|
| 194 |
+
def prepare_train_img(self, idx):
|
| 195 |
+
"""Get training data and annotations after pipeline.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
idx (int): Index of data.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
dict: Training data and annotation after pipeline with new keys
|
| 202 |
+
introduced by pipeline.
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
img_info = self.img_infos[idx]
|
| 206 |
+
ann_info = self.get_ann_info(idx)
|
| 207 |
+
results = dict(img_info=img_info, ann_info=ann_info)
|
| 208 |
+
self.pre_pipeline(results)
|
| 209 |
+
return self.pipeline(results)
|
| 210 |
+
|
| 211 |
+
def prepare_test_img(self, idx):
|
| 212 |
+
"""Get testing data after pipeline.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
idx (int): Index of data.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
dict: Testing data after pipeline with new keys introduced by
|
| 219 |
+
pipeline.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
img_info = self.img_infos[idx]
|
| 223 |
+
results = dict(img_info=img_info)
|
| 224 |
+
self.pre_pipeline(results)
|
| 225 |
+
return self.pipeline(results)
|
| 226 |
+
|
| 227 |
+
def format_results(self, results, **kwargs):
|
| 228 |
+
"""Place holder to format result to dataset specific output."""
|
| 229 |
+
|
| 230 |
+
def get_gt_seg_maps(self, efficient_test=False):
|
| 231 |
+
"""Get ground truth segmentation maps for evaluation."""
|
| 232 |
+
gt_seg_maps = []
|
| 233 |
+
for img_info in self.img_infos:
|
| 234 |
+
seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
|
| 235 |
+
if efficient_test:
|
| 236 |
+
gt_seg_map = seg_map
|
| 237 |
+
else:
|
| 238 |
+
gt_seg_map = mmcv.imread(
|
| 239 |
+
seg_map, flag='unchanged', backend='pillow')
|
| 240 |
+
gt_seg_maps.append(gt_seg_map)
|
| 241 |
+
return gt_seg_maps
|
| 242 |
+
|
| 243 |
+
def get_classes_and_palette(self, classes=None, palette=None):
|
| 244 |
+
"""Get class names of current dataset.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
classes (Sequence[str] | str | None): If classes is None, use
|
| 248 |
+
default CLASSES defined by builtin dataset. If classes is a
|
| 249 |
+
string, take it as a file name. The file contains the name of
|
| 250 |
+
classes where each line contains one class name. If classes is
|
| 251 |
+
a tuple or list, override the CLASSES defined by the dataset.
|
| 252 |
+
palette (Sequence[Sequence[int]]] | np.ndarray | None):
|
| 253 |
+
The palette of segmentation map. If None is given, random
|
| 254 |
+
palette will be generated. Default: None
|
| 255 |
+
"""
|
| 256 |
+
if classes is None:
|
| 257 |
+
self.custom_classes = False
|
| 258 |
+
return self.CLASSES, self.PALETTE
|
| 259 |
+
|
| 260 |
+
self.custom_classes = True
|
| 261 |
+
if isinstance(classes, str):
|
| 262 |
+
# take it as a file path
|
| 263 |
+
class_names = mmcv.list_from_file(classes)
|
| 264 |
+
elif isinstance(classes, (tuple, list)):
|
| 265 |
+
class_names = classes
|
| 266 |
+
else:
|
| 267 |
+
raise ValueError(f'Unsupported type {type(classes)} of classes.')
|
| 268 |
+
|
| 269 |
+
if self.CLASSES:
|
| 270 |
+
if not set(classes).issubset(self.CLASSES):
|
| 271 |
+
raise ValueError('classes is not a subset of CLASSES.')
|
| 272 |
+
|
| 273 |
+
# dictionary, its keys are the old label ids and its values
|
| 274 |
+
# are the new label ids.
|
| 275 |
+
# used for changing pixel labels in load_annotations.
|
| 276 |
+
self.label_map = {}
|
| 277 |
+
for i, c in enumerate(self.CLASSES):
|
| 278 |
+
if c not in class_names:
|
| 279 |
+
self.label_map[i] = -1
|
| 280 |
+
else:
|
| 281 |
+
self.label_map[i] = classes.index(c)
|
| 282 |
+
|
| 283 |
+
palette = self.get_palette_for_custom_classes(class_names, palette)
|
| 284 |
+
|
| 285 |
+
return class_names, palette
|
| 286 |
+
|
| 287 |
+
def get_palette_for_custom_classes(self, class_names, palette=None):
|
| 288 |
+
|
| 289 |
+
if self.label_map is not None:
|
| 290 |
+
# return subset of palette
|
| 291 |
+
palette = []
|
| 292 |
+
for old_id, new_id in sorted(
|
| 293 |
+
self.label_map.items(), key=lambda x: x[1]):
|
| 294 |
+
if new_id != -1:
|
| 295 |
+
palette.append(self.PALETTE[old_id])
|
| 296 |
+
palette = type(self.PALETTE)(palette)
|
| 297 |
+
|
| 298 |
+
elif palette is None:
|
| 299 |
+
if self.PALETTE is None:
|
| 300 |
+
palette = np.random.randint(0, 255, size=(len(class_names), 3))
|
| 301 |
+
else:
|
| 302 |
+
palette = self.PALETTE
|
| 303 |
+
|
| 304 |
+
return palette
|
| 305 |
+
|
| 306 |
+
def evaluate(self,
|
| 307 |
+
results,
|
| 308 |
+
metric='mIoU',
|
| 309 |
+
logger=None,
|
| 310 |
+
efficient_test=False,
|
| 311 |
+
**kwargs):
|
| 312 |
+
"""Evaluate the dataset.
|
| 313 |
+
|
| 314 |
+
Args:
|
| 315 |
+
results (list): Testing results of the dataset.
|
| 316 |
+
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
|
| 317 |
+
'mDice' and 'mFscore' are supported.
|
| 318 |
+
logger (logging.Logger | None | str): Logger used for printing
|
| 319 |
+
related information during evaluation. Default: None.
|
| 320 |
+
|
| 321 |
+
Returns:
|
| 322 |
+
dict[str, float]: Default metrics.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
if isinstance(metric, str):
|
| 326 |
+
metric = [metric]
|
| 327 |
+
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
|
| 328 |
+
if not set(metric).issubset(set(allowed_metrics)):
|
| 329 |
+
raise KeyError('metric {} is not supported'.format(metric))
|
| 330 |
+
eval_results = {}
|
| 331 |
+
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
|
| 332 |
+
if self.CLASSES is None:
|
| 333 |
+
num_classes = len(
|
| 334 |
+
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
|
| 335 |
+
else:
|
| 336 |
+
num_classes = len(self.CLASSES)
|
| 337 |
+
ret_metrics = eval_metrics(
|
| 338 |
+
results,
|
| 339 |
+
gt_seg_maps,
|
| 340 |
+
num_classes,
|
| 341 |
+
self.ignore_index,
|
| 342 |
+
metric,
|
| 343 |
+
label_map=self.label_map,
|
| 344 |
+
reduce_zero_label=self.reduce_zero_label)
|
| 345 |
+
|
| 346 |
+
if self.CLASSES is None:
|
| 347 |
+
class_names = tuple(range(num_classes))
|
| 348 |
+
else:
|
| 349 |
+
class_names = self.CLASSES
|
| 350 |
+
|
| 351 |
+
# summary table
|
| 352 |
+
ret_metrics_summary = OrderedDict({
|
| 353 |
+
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
|
| 354 |
+
for ret_metric, ret_metric_value in ret_metrics.items()
|
| 355 |
+
})
|
| 356 |
+
|
| 357 |
+
# each class table
|
| 358 |
+
ret_metrics.pop('aAcc', None)
|
| 359 |
+
ret_metrics_class = OrderedDict({
|
| 360 |
+
ret_metric: np.round(ret_metric_value * 100, 2)
|
| 361 |
+
for ret_metric, ret_metric_value in ret_metrics.items()
|
| 362 |
+
})
|
| 363 |
+
ret_metrics_class.update({'Class': class_names})
|
| 364 |
+
ret_metrics_class.move_to_end('Class', last=False)
|
| 365 |
+
|
| 366 |
+
# for logger
|
| 367 |
+
class_table_data = PrettyTable()
|
| 368 |
+
for key, val in ret_metrics_class.items():
|
| 369 |
+
class_table_data.add_column(key, val)
|
| 370 |
+
|
| 371 |
+
summary_table_data = PrettyTable()
|
| 372 |
+
for key, val in ret_metrics_summary.items():
|
| 373 |
+
if key == 'aAcc':
|
| 374 |
+
summary_table_data.add_column(key, [val])
|
| 375 |
+
else:
|
| 376 |
+
summary_table_data.add_column('m' + key, [val])
|
| 377 |
+
|
| 378 |
+
print_log('per class results:', logger)
|
| 379 |
+
print_log('\n' + class_table_data.get_string(), logger=logger)
|
| 380 |
+
print_log('Summary:', logger)
|
| 381 |
+
print_log('\n' + summary_table_data.get_string(), logger=logger)
|
| 382 |
+
|
| 383 |
+
# each metric dict
|
| 384 |
+
for key, value in ret_metrics_summary.items():
|
| 385 |
+
if key == 'aAcc':
|
| 386 |
+
eval_results[key] = value / 100.0
|
| 387 |
+
else:
|
| 388 |
+
eval_results['m' + key] = value / 100.0
|
| 389 |
+
|
| 390 |
+
ret_metrics_class.pop('Class', None)
|
| 391 |
+
for key, value in ret_metrics_class.items():
|
| 392 |
+
eval_results.update({
|
| 393 |
+
key + '.' + str(name): value[idx] / 100.0
|
| 394 |
+
for idx, name in enumerate(class_names)
|
| 395 |
+
})
|
| 396 |
+
|
| 397 |
+
if mmcv.is_list_of(results, str):
|
| 398 |
+
for file_name in results:
|
| 399 |
+
os.remove(file_name)
|
| 400 |
+
return eval_results
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/dataset_wrappers.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class ConcatDataset(_ConcatDataset):
|
| 8 |
+
"""A wrapper of concatenated dataset.
|
| 9 |
+
|
| 10 |
+
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
|
| 11 |
+
concat the group flag for image aspect ratio.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
datasets (list[:obj:`Dataset`]): A list of datasets.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, datasets):
|
| 18 |
+
super(ConcatDataset, self).__init__(datasets)
|
| 19 |
+
self.CLASSES = datasets[0].CLASSES
|
| 20 |
+
self.PALETTE = datasets[0].PALETTE
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@DATASETS.register_module()
|
| 24 |
+
class RepeatDataset(object):
|
| 25 |
+
"""A wrapper of repeated dataset.
|
| 26 |
+
|
| 27 |
+
The length of repeated dataset will be `times` larger than the original
|
| 28 |
+
dataset. This is useful when the data loading time is long but the dataset
|
| 29 |
+
is small. Using RepeatDataset can reduce the data loading time between
|
| 30 |
+
epochs.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
dataset (:obj:`Dataset`): The dataset to be repeated.
|
| 34 |
+
times (int): Repeat times.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self, dataset, times):
|
| 38 |
+
self.dataset = dataset
|
| 39 |
+
self.times = times
|
| 40 |
+
self.CLASSES = dataset.CLASSES
|
| 41 |
+
self.PALETTE = dataset.PALETTE
|
| 42 |
+
self._ori_len = len(self.dataset)
|
| 43 |
+
|
| 44 |
+
def __getitem__(self, idx):
|
| 45 |
+
"""Get item from original dataset."""
|
| 46 |
+
return self.dataset[idx % self._ori_len]
|
| 47 |
+
|
| 48 |
+
def __len__(self):
|
| 49 |
+
"""The length is multiplied by ``times``"""
|
| 50 |
+
return self.times * self._ori_len
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/drive.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class DRIVEDataset(CustomDataset):
|
| 9 |
+
"""DRIVE dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for DRIVE, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_manual1.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(DRIVEDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='_manual1.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/hrf.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class HRFDataset(CustomDataset):
|
| 9 |
+
"""HRF dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for HRF, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(HRFDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pascal_context.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class PascalContextDataset(CustomDataset):
|
| 9 |
+
"""PascalContext dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 12 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 13 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 14 |
+
fixed to '.png'.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
split (str): Split txt file for PascalContext.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
|
| 21 |
+
'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
|
| 22 |
+
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
|
| 23 |
+
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
|
| 24 |
+
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
|
| 25 |
+
'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
|
| 26 |
+
'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
|
| 27 |
+
'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
|
| 28 |
+
'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
|
| 29 |
+
'window', 'wood')
|
| 30 |
+
|
| 31 |
+
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 32 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 33 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 34 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 35 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 36 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 37 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 38 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 39 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 40 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 41 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 42 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 43 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 44 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 45 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
| 46 |
+
|
| 47 |
+
def __init__(self, split, **kwargs):
|
| 48 |
+
super(PascalContextDataset, self).__init__(
|
| 49 |
+
img_suffix='.jpg',
|
| 50 |
+
seg_map_suffix='.png',
|
| 51 |
+
split=split,
|
| 52 |
+
reduce_zero_label=False,
|
| 53 |
+
**kwargs)
|
| 54 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@DATASETS.register_module()
|
| 58 |
+
class PascalContextDataset59(CustomDataset):
|
| 59 |
+
"""PascalContext dataset.
|
| 60 |
+
|
| 61 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 62 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 63 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 64 |
+
fixed to '.png'.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
split (str): Split txt file for PascalContext.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
|
| 71 |
+
'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
|
| 72 |
+
'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
|
| 73 |
+
'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
|
| 74 |
+
'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
|
| 75 |
+
'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
|
| 76 |
+
'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
|
| 77 |
+
'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
|
| 78 |
+
'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
|
| 79 |
+
|
| 80 |
+
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
|
| 81 |
+
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
|
| 82 |
+
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
|
| 83 |
+
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
|
| 84 |
+
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
|
| 85 |
+
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
|
| 86 |
+
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
|
| 87 |
+
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
|
| 88 |
+
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
|
| 89 |
+
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
|
| 90 |
+
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
|
| 91 |
+
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
|
| 92 |
+
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
|
| 93 |
+
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
|
| 94 |
+
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
| 95 |
+
|
| 96 |
+
def __init__(self, split, **kwargs):
|
| 97 |
+
super(PascalContextDataset59, self).__init__(
|
| 98 |
+
img_suffix='.jpg',
|
| 99 |
+
seg_map_suffix='.png',
|
| 100 |
+
split=split,
|
| 101 |
+
reduce_zero_label=True,
|
| 102 |
+
**kwargs)
|
| 103 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .compose import Compose
|
| 2 |
+
from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
|
| 3 |
+
Transpose, to_tensor)
|
| 4 |
+
from .loading import LoadAnnotations, LoadImageFromFile
|
| 5 |
+
from .test_time_aug import MultiScaleFlipAug
|
| 6 |
+
from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
|
| 7 |
+
PhotoMetricDistortion, RandomCrop, RandomFlip,
|
| 8 |
+
RandomRotate, Rerange, Resize, RGB2Gray, SegRescale)
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
|
| 12 |
+
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
|
| 13 |
+
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
|
| 14 |
+
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
|
| 15 |
+
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray'
|
| 16 |
+
]
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/compose.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
|
| 3 |
+
from annotator.uniformer.mmcv.utils import build_from_cfg
|
| 4 |
+
|
| 5 |
+
from ..builder import PIPELINES
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@PIPELINES.register_module()
|
| 9 |
+
class Compose(object):
|
| 10 |
+
"""Compose multiple transforms sequentially.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
transforms (Sequence[dict | callable]): Sequence of transform object or
|
| 14 |
+
config dict to be composed.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, transforms):
|
| 18 |
+
assert isinstance(transforms, collections.abc.Sequence)
|
| 19 |
+
self.transforms = []
|
| 20 |
+
for transform in transforms:
|
| 21 |
+
if isinstance(transform, dict):
|
| 22 |
+
transform = build_from_cfg(transform, PIPELINES)
|
| 23 |
+
self.transforms.append(transform)
|
| 24 |
+
elif callable(transform):
|
| 25 |
+
self.transforms.append(transform)
|
| 26 |
+
else:
|
| 27 |
+
raise TypeError('transform must be callable or a dict')
|
| 28 |
+
|
| 29 |
+
def __call__(self, data):
|
| 30 |
+
"""Call function to apply transforms sequentially.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
data (dict): A result dict contains the data to transform.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
dict: Transformed data.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
for t in self.transforms:
|
| 40 |
+
data = t(data)
|
| 41 |
+
if data is None:
|
| 42 |
+
return None
|
| 43 |
+
return data
|
| 44 |
+
|
| 45 |
+
def __repr__(self):
|
| 46 |
+
format_string = self.__class__.__name__ + '('
|
| 47 |
+
for t in self.transforms:
|
| 48 |
+
format_string += '\n'
|
| 49 |
+
format_string += f' {t}'
|
| 50 |
+
format_string += '\n)'
|
| 51 |
+
return format_string
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/formating.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
|
| 3 |
+
import annotator.uniformer.mmcv as mmcv
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from annotator.uniformer.mmcv.parallel import DataContainer as DC
|
| 7 |
+
|
| 8 |
+
from ..builder import PIPELINES
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def to_tensor(data):
|
| 12 |
+
"""Convert objects of various python types to :obj:`torch.Tensor`.
|
| 13 |
+
|
| 14 |
+
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
|
| 15 |
+
:class:`Sequence`, :class:`int` and :class:`float`.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
|
| 19 |
+
be converted.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
if isinstance(data, torch.Tensor):
|
| 23 |
+
return data
|
| 24 |
+
elif isinstance(data, np.ndarray):
|
| 25 |
+
return torch.from_numpy(data)
|
| 26 |
+
elif isinstance(data, Sequence) and not mmcv.is_str(data):
|
| 27 |
+
return torch.tensor(data)
|
| 28 |
+
elif isinstance(data, int):
|
| 29 |
+
return torch.LongTensor([data])
|
| 30 |
+
elif isinstance(data, float):
|
| 31 |
+
return torch.FloatTensor([data])
|
| 32 |
+
else:
|
| 33 |
+
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@PIPELINES.register_module()
|
| 37 |
+
class ToTensor(object):
|
| 38 |
+
"""Convert some results to :obj:`torch.Tensor` by given keys.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
keys (Sequence[str]): Keys that need to be converted to Tensor.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, keys):
|
| 45 |
+
self.keys = keys
|
| 46 |
+
|
| 47 |
+
def __call__(self, results):
|
| 48 |
+
"""Call function to convert data in results to :obj:`torch.Tensor`.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
results (dict): Result dict contains the data to convert.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
dict: The result dict contains the data converted
|
| 55 |
+
to :obj:`torch.Tensor`.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
for key in self.keys:
|
| 59 |
+
results[key] = to_tensor(results[key])
|
| 60 |
+
return results
|
| 61 |
+
|
| 62 |
+
def __repr__(self):
|
| 63 |
+
return self.__class__.__name__ + f'(keys={self.keys})'
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@PIPELINES.register_module()
|
| 67 |
+
class ImageToTensor(object):
|
| 68 |
+
"""Convert image to :obj:`torch.Tensor` by given keys.
|
| 69 |
+
|
| 70 |
+
The dimension order of input image is (H, W, C). The pipeline will convert
|
| 71 |
+
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
|
| 72 |
+
(1, H, W).
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
keys (Sequence[str]): Key of images to be converted to Tensor.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, keys):
|
| 79 |
+
self.keys = keys
|
| 80 |
+
|
| 81 |
+
def __call__(self, results):
|
| 82 |
+
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
| 83 |
+
transpose the channel order.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
results (dict): Result dict contains the image data to convert.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
dict: The result dict contains the image converted
|
| 90 |
+
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
for key in self.keys:
|
| 94 |
+
img = results[key]
|
| 95 |
+
if len(img.shape) < 3:
|
| 96 |
+
img = np.expand_dims(img, -1)
|
| 97 |
+
results[key] = to_tensor(img.transpose(2, 0, 1))
|
| 98 |
+
return results
|
| 99 |
+
|
| 100 |
+
def __repr__(self):
|
| 101 |
+
return self.__class__.__name__ + f'(keys={self.keys})'
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@PIPELINES.register_module()
|
| 105 |
+
class Transpose(object):
|
| 106 |
+
"""Transpose some results by given keys.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
keys (Sequence[str]): Keys of results to be transposed.
|
| 110 |
+
order (Sequence[int]): Order of transpose.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
def __init__(self, keys, order):
|
| 114 |
+
self.keys = keys
|
| 115 |
+
self.order = order
|
| 116 |
+
|
| 117 |
+
def __call__(self, results):
|
| 118 |
+
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
| 119 |
+
transpose the channel order.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
results (dict): Result dict contains the image data to convert.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
dict: The result dict contains the image converted
|
| 126 |
+
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
for key in self.keys:
|
| 130 |
+
results[key] = results[key].transpose(self.order)
|
| 131 |
+
return results
|
| 132 |
+
|
| 133 |
+
def __repr__(self):
|
| 134 |
+
return self.__class__.__name__ + \
|
| 135 |
+
f'(keys={self.keys}, order={self.order})'
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@PIPELINES.register_module()
|
| 139 |
+
class ToDataContainer(object):
|
| 140 |
+
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
fields (Sequence[dict]): Each field is a dict like
|
| 144 |
+
``dict(key='xxx', **kwargs)``. The ``key`` in result will
|
| 145 |
+
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
|
| 146 |
+
Default: ``(dict(key='img', stack=True),
|
| 147 |
+
dict(key='gt_semantic_seg'))``.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
def __init__(self,
|
| 151 |
+
fields=(dict(key='img',
|
| 152 |
+
stack=True), dict(key='gt_semantic_seg'))):
|
| 153 |
+
self.fields = fields
|
| 154 |
+
|
| 155 |
+
def __call__(self, results):
|
| 156 |
+
"""Call function to convert data in results to
|
| 157 |
+
:obj:`mmcv.DataContainer`.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
results (dict): Result dict contains the data to convert.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
dict: The result dict contains the data converted to
|
| 164 |
+
:obj:`mmcv.DataContainer`.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
for field in self.fields:
|
| 168 |
+
field = field.copy()
|
| 169 |
+
key = field.pop('key')
|
| 170 |
+
results[key] = DC(results[key], **field)
|
| 171 |
+
return results
|
| 172 |
+
|
| 173 |
+
def __repr__(self):
|
| 174 |
+
return self.__class__.__name__ + f'(fields={self.fields})'
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@PIPELINES.register_module()
|
| 178 |
+
class DefaultFormatBundle(object):
|
| 179 |
+
"""Default formatting bundle.
|
| 180 |
+
|
| 181 |
+
It simplifies the pipeline of formatting common fields, including "img"
|
| 182 |
+
and "gt_semantic_seg". These fields are formatted as follows.
|
| 183 |
+
|
| 184 |
+
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
|
| 185 |
+
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
|
| 186 |
+
(3)to DataContainer (stack=True)
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
def __call__(self, results):
|
| 190 |
+
"""Call function to transform and format common fields in results.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
results (dict): Result dict contains the data to convert.
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
dict: The result dict contains the data that is formatted with
|
| 197 |
+
default bundle.
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
if 'img' in results:
|
| 201 |
+
img = results['img']
|
| 202 |
+
if len(img.shape) < 3:
|
| 203 |
+
img = np.expand_dims(img, -1)
|
| 204 |
+
img = np.ascontiguousarray(img.transpose(2, 0, 1))
|
| 205 |
+
results['img'] = DC(to_tensor(img), stack=True)
|
| 206 |
+
if 'gt_semantic_seg' in results:
|
| 207 |
+
# convert to long
|
| 208 |
+
results['gt_semantic_seg'] = DC(
|
| 209 |
+
to_tensor(results['gt_semantic_seg'][None,
|
| 210 |
+
...].astype(np.int64)),
|
| 211 |
+
stack=True)
|
| 212 |
+
return results
|
| 213 |
+
|
| 214 |
+
def __repr__(self):
|
| 215 |
+
return self.__class__.__name__
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@PIPELINES.register_module()
|
| 219 |
+
class Collect(object):
|
| 220 |
+
"""Collect data from the loader relevant to the specific task.
|
| 221 |
+
|
| 222 |
+
This is usually the last stage of the data loader pipeline. Typically keys
|
| 223 |
+
is set to some subset of "img", "gt_semantic_seg".
|
| 224 |
+
|
| 225 |
+
The "img_meta" item is always populated. The contents of the "img_meta"
|
| 226 |
+
dictionary depends on "meta_keys". By default this includes:
|
| 227 |
+
|
| 228 |
+
- "img_shape": shape of the image input to the network as a tuple
|
| 229 |
+
(h, w, c). Note that images may be zero padded on the bottom/right
|
| 230 |
+
if the batch tensor is larger than this shape.
|
| 231 |
+
|
| 232 |
+
- "scale_factor": a float indicating the preprocessing scale
|
| 233 |
+
|
| 234 |
+
- "flip": a boolean indicating if image flip transform was used
|
| 235 |
+
|
| 236 |
+
- "filename": path to the image file
|
| 237 |
+
|
| 238 |
+
- "ori_shape": original shape of the image as a tuple (h, w, c)
|
| 239 |
+
|
| 240 |
+
- "pad_shape": image shape after padding
|
| 241 |
+
|
| 242 |
+
- "img_norm_cfg": a dict of normalization information:
|
| 243 |
+
- mean - per channel mean subtraction
|
| 244 |
+
- std - per channel std divisor
|
| 245 |
+
- to_rgb - bool indicating if bgr was converted to rgb
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
keys (Sequence[str]): Keys of results to be collected in ``data``.
|
| 249 |
+
meta_keys (Sequence[str], optional): Meta keys to be converted to
|
| 250 |
+
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
|
| 251 |
+
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
|
| 252 |
+
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
|
| 253 |
+
'img_norm_cfg')``
|
| 254 |
+
"""
|
| 255 |
+
|
| 256 |
+
def __init__(self,
|
| 257 |
+
keys,
|
| 258 |
+
meta_keys=('filename', 'ori_filename', 'ori_shape',
|
| 259 |
+
'img_shape', 'pad_shape', 'scale_factor', 'flip',
|
| 260 |
+
'flip_direction', 'img_norm_cfg')):
|
| 261 |
+
self.keys = keys
|
| 262 |
+
self.meta_keys = meta_keys
|
| 263 |
+
|
| 264 |
+
def __call__(self, results):
|
| 265 |
+
"""Call function to collect keys in results. The keys in ``meta_keys``
|
| 266 |
+
will be converted to :obj:mmcv.DataContainer.
|
| 267 |
+
|
| 268 |
+
Args:
|
| 269 |
+
results (dict): Result dict contains the data to collect.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
dict: The result dict contains the following keys
|
| 273 |
+
- keys in``self.keys``
|
| 274 |
+
- ``img_metas``
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
data = {}
|
| 278 |
+
img_meta = {}
|
| 279 |
+
for key in self.meta_keys:
|
| 280 |
+
img_meta[key] = results[key]
|
| 281 |
+
data['img_metas'] = DC(img_meta, cpu_only=True)
|
| 282 |
+
for key in self.keys:
|
| 283 |
+
data[key] = results[key]
|
| 284 |
+
return data
|
| 285 |
+
|
| 286 |
+
def __repr__(self):
|
| 287 |
+
return self.__class__.__name__ + \
|
| 288 |
+
f'(keys={self.keys}, meta_keys={self.meta_keys})'
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/loading.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
import annotator.uniformer.mmcv as mmcv
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from ..builder import PIPELINES
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class LoadImageFromFile(object):
|
| 11 |
+
"""Load an image from file.
|
| 12 |
+
|
| 13 |
+
Required keys are "img_prefix" and "img_info" (a dict that must contain the
|
| 14 |
+
key "filename"). Added or updated keys are "filename", "img", "img_shape",
|
| 15 |
+
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
|
| 16 |
+
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
to_float32 (bool): Whether to convert the loaded image to a float32
|
| 20 |
+
numpy array. If set to False, the loaded image is an uint8 array.
|
| 21 |
+
Defaults to False.
|
| 22 |
+
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
|
| 23 |
+
Defaults to 'color'.
|
| 24 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 25 |
+
See :class:`mmcv.fileio.FileClient` for details.
|
| 26 |
+
Defaults to ``dict(backend='disk')``.
|
| 27 |
+
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
|
| 28 |
+
'cv2'
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self,
|
| 32 |
+
to_float32=False,
|
| 33 |
+
color_type='color',
|
| 34 |
+
file_client_args=dict(backend='disk'),
|
| 35 |
+
imdecode_backend='cv2'):
|
| 36 |
+
self.to_float32 = to_float32
|
| 37 |
+
self.color_type = color_type
|
| 38 |
+
self.file_client_args = file_client_args.copy()
|
| 39 |
+
self.file_client = None
|
| 40 |
+
self.imdecode_backend = imdecode_backend
|
| 41 |
+
|
| 42 |
+
def __call__(self, results):
|
| 43 |
+
"""Call functions to load image and get image meta information.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
dict: The dict contains loaded image and meta information.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
if self.file_client is None:
|
| 53 |
+
self.file_client = mmcv.FileClient(**self.file_client_args)
|
| 54 |
+
|
| 55 |
+
if results.get('img_prefix') is not None:
|
| 56 |
+
filename = osp.join(results['img_prefix'],
|
| 57 |
+
results['img_info']['filename'])
|
| 58 |
+
else:
|
| 59 |
+
filename = results['img_info']['filename']
|
| 60 |
+
img_bytes = self.file_client.get(filename)
|
| 61 |
+
img = mmcv.imfrombytes(
|
| 62 |
+
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
|
| 63 |
+
if self.to_float32:
|
| 64 |
+
img = img.astype(np.float32)
|
| 65 |
+
|
| 66 |
+
results['filename'] = filename
|
| 67 |
+
results['ori_filename'] = results['img_info']['filename']
|
| 68 |
+
results['img'] = img
|
| 69 |
+
results['img_shape'] = img.shape
|
| 70 |
+
results['ori_shape'] = img.shape
|
| 71 |
+
# Set initial values for default meta_keys
|
| 72 |
+
results['pad_shape'] = img.shape
|
| 73 |
+
results['scale_factor'] = 1.0
|
| 74 |
+
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
|
| 75 |
+
results['img_norm_cfg'] = dict(
|
| 76 |
+
mean=np.zeros(num_channels, dtype=np.float32),
|
| 77 |
+
std=np.ones(num_channels, dtype=np.float32),
|
| 78 |
+
to_rgb=False)
|
| 79 |
+
return results
|
| 80 |
+
|
| 81 |
+
def __repr__(self):
|
| 82 |
+
repr_str = self.__class__.__name__
|
| 83 |
+
repr_str += f'(to_float32={self.to_float32},'
|
| 84 |
+
repr_str += f"color_type='{self.color_type}',"
|
| 85 |
+
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
|
| 86 |
+
return repr_str
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@PIPELINES.register_module()
|
| 90 |
+
class LoadAnnotations(object):
|
| 91 |
+
"""Load annotations for semantic segmentation.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
reduce_zero_label (bool): Whether reduce all label value by 1.
|
| 95 |
+
Usually used for datasets where 0 is background label.
|
| 96 |
+
Default: False.
|
| 97 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 98 |
+
See :class:`mmcv.fileio.FileClient` for details.
|
| 99 |
+
Defaults to ``dict(backend='disk')``.
|
| 100 |
+
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
|
| 101 |
+
'pillow'
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(self,
|
| 105 |
+
reduce_zero_label=False,
|
| 106 |
+
file_client_args=dict(backend='disk'),
|
| 107 |
+
imdecode_backend='pillow'):
|
| 108 |
+
self.reduce_zero_label = reduce_zero_label
|
| 109 |
+
self.file_client_args = file_client_args.copy()
|
| 110 |
+
self.file_client = None
|
| 111 |
+
self.imdecode_backend = imdecode_backend
|
| 112 |
+
|
| 113 |
+
def __call__(self, results):
|
| 114 |
+
"""Call function to load multiple types annotations.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
dict: The dict contains loaded semantic segmentation annotations.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
if self.file_client is None:
|
| 124 |
+
self.file_client = mmcv.FileClient(**self.file_client_args)
|
| 125 |
+
|
| 126 |
+
if results.get('seg_prefix', None) is not None:
|
| 127 |
+
filename = osp.join(results['seg_prefix'],
|
| 128 |
+
results['ann_info']['seg_map'])
|
| 129 |
+
else:
|
| 130 |
+
filename = results['ann_info']['seg_map']
|
| 131 |
+
img_bytes = self.file_client.get(filename)
|
| 132 |
+
gt_semantic_seg = mmcv.imfrombytes(
|
| 133 |
+
img_bytes, flag='unchanged',
|
| 134 |
+
backend=self.imdecode_backend).squeeze().astype(np.uint8)
|
| 135 |
+
# modify if custom classes
|
| 136 |
+
if results.get('label_map', None) is not None:
|
| 137 |
+
for old_id, new_id in results['label_map'].items():
|
| 138 |
+
gt_semantic_seg[gt_semantic_seg == old_id] = new_id
|
| 139 |
+
# reduce zero_label
|
| 140 |
+
if self.reduce_zero_label:
|
| 141 |
+
# avoid using underflow conversion
|
| 142 |
+
gt_semantic_seg[gt_semantic_seg == 0] = 255
|
| 143 |
+
gt_semantic_seg = gt_semantic_seg - 1
|
| 144 |
+
gt_semantic_seg[gt_semantic_seg == 254] = 255
|
| 145 |
+
results['gt_semantic_seg'] = gt_semantic_seg
|
| 146 |
+
results['seg_fields'].append('gt_semantic_seg')
|
| 147 |
+
return results
|
| 148 |
+
|
| 149 |
+
def __repr__(self):
|
| 150 |
+
repr_str = self.__class__.__name__
|
| 151 |
+
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
|
| 152 |
+
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
|
| 153 |
+
return repr_str
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/test_time_aug.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import annotator.uniformer.mmcv as mmcv
|
| 4 |
+
|
| 5 |
+
from ..builder import PIPELINES
|
| 6 |
+
from .compose import Compose
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class MultiScaleFlipAug(object):
|
| 11 |
+
"""Test-time augmentation with multiple scales and flipping.
|
| 12 |
+
|
| 13 |
+
An example configuration is as followed:
|
| 14 |
+
|
| 15 |
+
.. code-block::
|
| 16 |
+
|
| 17 |
+
img_scale=(2048, 1024),
|
| 18 |
+
img_ratios=[0.5, 1.0],
|
| 19 |
+
flip=True,
|
| 20 |
+
transforms=[
|
| 21 |
+
dict(type='Resize', keep_ratio=True),
|
| 22 |
+
dict(type='RandomFlip'),
|
| 23 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 24 |
+
dict(type='Pad', size_divisor=32),
|
| 25 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 26 |
+
dict(type='Collect', keys=['img']),
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
After MultiScaleFLipAug with above configuration, the results are wrapped
|
| 30 |
+
into lists of the same length as followed:
|
| 31 |
+
|
| 32 |
+
.. code-block::
|
| 33 |
+
|
| 34 |
+
dict(
|
| 35 |
+
img=[...],
|
| 36 |
+
img_shape=[...],
|
| 37 |
+
scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]
|
| 38 |
+
flip=[False, True, False, True]
|
| 39 |
+
...
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
transforms (list[dict]): Transforms to apply in each augmentation.
|
| 44 |
+
img_scale (None | tuple | list[tuple]): Images scales for resizing.
|
| 45 |
+
img_ratios (float | list[float]): Image ratios for resizing
|
| 46 |
+
flip (bool): Whether apply flip augmentation. Default: False.
|
| 47 |
+
flip_direction (str | list[str]): Flip augmentation directions,
|
| 48 |
+
options are "horizontal" and "vertical". If flip_direction is list,
|
| 49 |
+
multiple flip augmentations will be applied.
|
| 50 |
+
It has no effect when flip == False. Default: "horizontal".
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self,
|
| 54 |
+
transforms,
|
| 55 |
+
img_scale,
|
| 56 |
+
img_ratios=None,
|
| 57 |
+
flip=False,
|
| 58 |
+
flip_direction='horizontal'):
|
| 59 |
+
self.transforms = Compose(transforms)
|
| 60 |
+
if img_ratios is not None:
|
| 61 |
+
img_ratios = img_ratios if isinstance(img_ratios,
|
| 62 |
+
list) else [img_ratios]
|
| 63 |
+
assert mmcv.is_list_of(img_ratios, float)
|
| 64 |
+
if img_scale is None:
|
| 65 |
+
# mode 1: given img_scale=None and a range of image ratio
|
| 66 |
+
self.img_scale = None
|
| 67 |
+
assert mmcv.is_list_of(img_ratios, float)
|
| 68 |
+
elif isinstance(img_scale, tuple) and mmcv.is_list_of(
|
| 69 |
+
img_ratios, float):
|
| 70 |
+
assert len(img_scale) == 2
|
| 71 |
+
# mode 2: given a scale and a range of image ratio
|
| 72 |
+
self.img_scale = [(int(img_scale[0] * ratio),
|
| 73 |
+
int(img_scale[1] * ratio))
|
| 74 |
+
for ratio in img_ratios]
|
| 75 |
+
else:
|
| 76 |
+
# mode 3: given multiple scales
|
| 77 |
+
self.img_scale = img_scale if isinstance(img_scale,
|
| 78 |
+
list) else [img_scale]
|
| 79 |
+
assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None
|
| 80 |
+
self.flip = flip
|
| 81 |
+
self.img_ratios = img_ratios
|
| 82 |
+
self.flip_direction = flip_direction if isinstance(
|
| 83 |
+
flip_direction, list) else [flip_direction]
|
| 84 |
+
assert mmcv.is_list_of(self.flip_direction, str)
|
| 85 |
+
if not self.flip and self.flip_direction != ['horizontal']:
|
| 86 |
+
warnings.warn(
|
| 87 |
+
'flip_direction has no effect when flip is set to False')
|
| 88 |
+
if (self.flip
|
| 89 |
+
and not any([t['type'] == 'RandomFlip' for t in transforms])):
|
| 90 |
+
warnings.warn(
|
| 91 |
+
'flip has no effect when RandomFlip is not in transforms')
|
| 92 |
+
|
| 93 |
+
def __call__(self, results):
|
| 94 |
+
"""Call function to apply test time augment transforms on results.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
results (dict): Result dict contains the data to transform.
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
dict[str: list]: The augmented data, where each value is wrapped
|
| 101 |
+
into a list.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
aug_data = []
|
| 105 |
+
if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float):
|
| 106 |
+
h, w = results['img'].shape[:2]
|
| 107 |
+
img_scale = [(int(w * ratio), int(h * ratio))
|
| 108 |
+
for ratio in self.img_ratios]
|
| 109 |
+
else:
|
| 110 |
+
img_scale = self.img_scale
|
| 111 |
+
flip_aug = [False, True] if self.flip else [False]
|
| 112 |
+
for scale in img_scale:
|
| 113 |
+
for flip in flip_aug:
|
| 114 |
+
for direction in self.flip_direction:
|
| 115 |
+
_results = results.copy()
|
| 116 |
+
_results['scale'] = scale
|
| 117 |
+
_results['flip'] = flip
|
| 118 |
+
_results['flip_direction'] = direction
|
| 119 |
+
data = self.transforms(_results)
|
| 120 |
+
aug_data.append(data)
|
| 121 |
+
# list of dict to dict of list
|
| 122 |
+
aug_data_dict = {key: [] for key in aug_data[0]}
|
| 123 |
+
for data in aug_data:
|
| 124 |
+
for key, val in data.items():
|
| 125 |
+
aug_data_dict[key].append(val)
|
| 126 |
+
return aug_data_dict
|
| 127 |
+
|
| 128 |
+
def __repr__(self):
|
| 129 |
+
repr_str = self.__class__.__name__
|
| 130 |
+
repr_str += f'(transforms={self.transforms}, '
|
| 131 |
+
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
|
| 132 |
+
repr_str += f'flip_direction={self.flip_direction}'
|
| 133 |
+
return repr_str
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/pipelines/transforms.py
ADDED
|
@@ -0,0 +1,889 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import annotator.uniformer.mmcv as mmcv
|
| 2 |
+
import numpy as np
|
| 3 |
+
from annotator.uniformer.mmcv.utils import deprecated_api_warning, is_tuple_of
|
| 4 |
+
from numpy import random
|
| 5 |
+
|
| 6 |
+
from ..builder import PIPELINES
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class Resize(object):
|
| 11 |
+
"""Resize images & seg.
|
| 12 |
+
|
| 13 |
+
This transform resizes the input image to some scale. If the input dict
|
| 14 |
+
contains the key "scale", then the scale in the input dict is used,
|
| 15 |
+
otherwise the specified scale in the init method is used.
|
| 16 |
+
|
| 17 |
+
``img_scale`` can be None, a tuple (single-scale) or a list of tuple
|
| 18 |
+
(multi-scale). There are 4 multiscale modes:
|
| 19 |
+
|
| 20 |
+
- ``ratio_range is not None``:
|
| 21 |
+
1. When img_scale is None, img_scale is the shape of image in results
|
| 22 |
+
(img_scale = results['img'].shape[:2]) and the image is resized based
|
| 23 |
+
on the original size. (mode 1)
|
| 24 |
+
2. When img_scale is a tuple (single-scale), randomly sample a ratio from
|
| 25 |
+
the ratio range and multiply it with the image scale. (mode 2)
|
| 26 |
+
|
| 27 |
+
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
|
| 28 |
+
scale from the a range. (mode 3)
|
| 29 |
+
|
| 30 |
+
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
|
| 31 |
+
scale from multiple scales. (mode 4)
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
img_scale (tuple or list[tuple]): Images scales for resizing.
|
| 35 |
+
multiscale_mode (str): Either "range" or "value".
|
| 36 |
+
ratio_range (tuple[float]): (min_ratio, max_ratio)
|
| 37 |
+
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
|
| 38 |
+
image.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self,
|
| 42 |
+
img_scale=None,
|
| 43 |
+
multiscale_mode='range',
|
| 44 |
+
ratio_range=None,
|
| 45 |
+
keep_ratio=True):
|
| 46 |
+
if img_scale is None:
|
| 47 |
+
self.img_scale = None
|
| 48 |
+
else:
|
| 49 |
+
if isinstance(img_scale, list):
|
| 50 |
+
self.img_scale = img_scale
|
| 51 |
+
else:
|
| 52 |
+
self.img_scale = [img_scale]
|
| 53 |
+
assert mmcv.is_list_of(self.img_scale, tuple)
|
| 54 |
+
|
| 55 |
+
if ratio_range is not None:
|
| 56 |
+
# mode 1: given img_scale=None and a range of image ratio
|
| 57 |
+
# mode 2: given a scale and a range of image ratio
|
| 58 |
+
assert self.img_scale is None or len(self.img_scale) == 1
|
| 59 |
+
else:
|
| 60 |
+
# mode 3 and 4: given multiple scales or a range of scales
|
| 61 |
+
assert multiscale_mode in ['value', 'range']
|
| 62 |
+
|
| 63 |
+
self.multiscale_mode = multiscale_mode
|
| 64 |
+
self.ratio_range = ratio_range
|
| 65 |
+
self.keep_ratio = keep_ratio
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
def random_select(img_scales):
|
| 69 |
+
"""Randomly select an img_scale from given candidates.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
img_scales (list[tuple]): Images scales for selection.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
|
| 76 |
+
where ``img_scale`` is the selected image scale and
|
| 77 |
+
``scale_idx`` is the selected index in the given candidates.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
assert mmcv.is_list_of(img_scales, tuple)
|
| 81 |
+
scale_idx = np.random.randint(len(img_scales))
|
| 82 |
+
img_scale = img_scales[scale_idx]
|
| 83 |
+
return img_scale, scale_idx
|
| 84 |
+
|
| 85 |
+
@staticmethod
|
| 86 |
+
def random_sample(img_scales):
|
| 87 |
+
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
img_scales (list[tuple]): Images scale range for sampling.
|
| 91 |
+
There must be two tuples in img_scales, which specify the lower
|
| 92 |
+
and upper bound of image scales.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
(tuple, None): Returns a tuple ``(img_scale, None)``, where
|
| 96 |
+
``img_scale`` is sampled scale and None is just a placeholder
|
| 97 |
+
to be consistent with :func:`random_select`.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
|
| 101 |
+
img_scale_long = [max(s) for s in img_scales]
|
| 102 |
+
img_scale_short = [min(s) for s in img_scales]
|
| 103 |
+
long_edge = np.random.randint(
|
| 104 |
+
min(img_scale_long),
|
| 105 |
+
max(img_scale_long) + 1)
|
| 106 |
+
short_edge = np.random.randint(
|
| 107 |
+
min(img_scale_short),
|
| 108 |
+
max(img_scale_short) + 1)
|
| 109 |
+
img_scale = (long_edge, short_edge)
|
| 110 |
+
return img_scale, None
|
| 111 |
+
|
| 112 |
+
@staticmethod
|
| 113 |
+
def random_sample_ratio(img_scale, ratio_range):
|
| 114 |
+
"""Randomly sample an img_scale when ``ratio_range`` is specified.
|
| 115 |
+
|
| 116 |
+
A ratio will be randomly sampled from the range specified by
|
| 117 |
+
``ratio_range``. Then it would be multiplied with ``img_scale`` to
|
| 118 |
+
generate sampled scale.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
img_scale (tuple): Images scale base to multiply with ratio.
|
| 122 |
+
ratio_range (tuple[float]): The minimum and maximum ratio to scale
|
| 123 |
+
the ``img_scale``.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
(tuple, None): Returns a tuple ``(scale, None)``, where
|
| 127 |
+
``scale`` is sampled ratio multiplied with ``img_scale`` and
|
| 128 |
+
None is just a placeholder to be consistent with
|
| 129 |
+
:func:`random_select`.
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
assert isinstance(img_scale, tuple) and len(img_scale) == 2
|
| 133 |
+
min_ratio, max_ratio = ratio_range
|
| 134 |
+
assert min_ratio <= max_ratio
|
| 135 |
+
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
|
| 136 |
+
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
|
| 137 |
+
return scale, None
|
| 138 |
+
|
| 139 |
+
def _random_scale(self, results):
|
| 140 |
+
"""Randomly sample an img_scale according to ``ratio_range`` and
|
| 141 |
+
``multiscale_mode``.
|
| 142 |
+
|
| 143 |
+
If ``ratio_range`` is specified, a ratio will be sampled and be
|
| 144 |
+
multiplied with ``img_scale``.
|
| 145 |
+
If multiple scales are specified by ``img_scale``, a scale will be
|
| 146 |
+
sampled according to ``multiscale_mode``.
|
| 147 |
+
Otherwise, single scale will be used.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
results (dict): Result dict from :obj:`dataset`.
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
dict: Two new keys 'scale` and 'scale_idx` are added into
|
| 154 |
+
``results``, which would be used by subsequent pipelines.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
if self.ratio_range is not None:
|
| 158 |
+
if self.img_scale is None:
|
| 159 |
+
h, w = results['img'].shape[:2]
|
| 160 |
+
scale, scale_idx = self.random_sample_ratio((w, h),
|
| 161 |
+
self.ratio_range)
|
| 162 |
+
else:
|
| 163 |
+
scale, scale_idx = self.random_sample_ratio(
|
| 164 |
+
self.img_scale[0], self.ratio_range)
|
| 165 |
+
elif len(self.img_scale) == 1:
|
| 166 |
+
scale, scale_idx = self.img_scale[0], 0
|
| 167 |
+
elif self.multiscale_mode == 'range':
|
| 168 |
+
scale, scale_idx = self.random_sample(self.img_scale)
|
| 169 |
+
elif self.multiscale_mode == 'value':
|
| 170 |
+
scale, scale_idx = self.random_select(self.img_scale)
|
| 171 |
+
else:
|
| 172 |
+
raise NotImplementedError
|
| 173 |
+
|
| 174 |
+
results['scale'] = scale
|
| 175 |
+
results['scale_idx'] = scale_idx
|
| 176 |
+
|
| 177 |
+
def _resize_img(self, results):
|
| 178 |
+
"""Resize images with ``results['scale']``."""
|
| 179 |
+
if self.keep_ratio:
|
| 180 |
+
img, scale_factor = mmcv.imrescale(
|
| 181 |
+
results['img'], results['scale'], return_scale=True)
|
| 182 |
+
# the w_scale and h_scale has minor difference
|
| 183 |
+
# a real fix should be done in the mmcv.imrescale in the future
|
| 184 |
+
new_h, new_w = img.shape[:2]
|
| 185 |
+
h, w = results['img'].shape[:2]
|
| 186 |
+
w_scale = new_w / w
|
| 187 |
+
h_scale = new_h / h
|
| 188 |
+
else:
|
| 189 |
+
img, w_scale, h_scale = mmcv.imresize(
|
| 190 |
+
results['img'], results['scale'], return_scale=True)
|
| 191 |
+
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
|
| 192 |
+
dtype=np.float32)
|
| 193 |
+
results['img'] = img
|
| 194 |
+
results['img_shape'] = img.shape
|
| 195 |
+
results['pad_shape'] = img.shape # in case that there is no padding
|
| 196 |
+
results['scale_factor'] = scale_factor
|
| 197 |
+
results['keep_ratio'] = self.keep_ratio
|
| 198 |
+
|
| 199 |
+
def _resize_seg(self, results):
|
| 200 |
+
"""Resize semantic segmentation map with ``results['scale']``."""
|
| 201 |
+
for key in results.get('seg_fields', []):
|
| 202 |
+
if self.keep_ratio:
|
| 203 |
+
gt_seg = mmcv.imrescale(
|
| 204 |
+
results[key], results['scale'], interpolation='nearest')
|
| 205 |
+
else:
|
| 206 |
+
gt_seg = mmcv.imresize(
|
| 207 |
+
results[key], results['scale'], interpolation='nearest')
|
| 208 |
+
results[key] = gt_seg
|
| 209 |
+
|
| 210 |
+
def __call__(self, results):
|
| 211 |
+
"""Call function to resize images, bounding boxes, masks, semantic
|
| 212 |
+
segmentation map.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
results (dict): Result dict from loading pipeline.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
|
| 219 |
+
'keep_ratio' keys are added into result dict.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
if 'scale' not in results:
|
| 223 |
+
self._random_scale(results)
|
| 224 |
+
self._resize_img(results)
|
| 225 |
+
self._resize_seg(results)
|
| 226 |
+
return results
|
| 227 |
+
|
| 228 |
+
def __repr__(self):
|
| 229 |
+
repr_str = self.__class__.__name__
|
| 230 |
+
repr_str += (f'(img_scale={self.img_scale}, '
|
| 231 |
+
f'multiscale_mode={self.multiscale_mode}, '
|
| 232 |
+
f'ratio_range={self.ratio_range}, '
|
| 233 |
+
f'keep_ratio={self.keep_ratio})')
|
| 234 |
+
return repr_str
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
@PIPELINES.register_module()
|
| 238 |
+
class RandomFlip(object):
|
| 239 |
+
"""Flip the image & seg.
|
| 240 |
+
|
| 241 |
+
If the input dict contains the key "flip", then the flag will be used,
|
| 242 |
+
otherwise it will be randomly decided by a ratio specified in the init
|
| 243 |
+
method.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
prob (float, optional): The flipping probability. Default: None.
|
| 247 |
+
direction(str, optional): The flipping direction. Options are
|
| 248 |
+
'horizontal' and 'vertical'. Default: 'horizontal'.
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
@deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
|
| 252 |
+
def __init__(self, prob=None, direction='horizontal'):
|
| 253 |
+
self.prob = prob
|
| 254 |
+
self.direction = direction
|
| 255 |
+
if prob is not None:
|
| 256 |
+
assert prob >= 0 and prob <= 1
|
| 257 |
+
assert direction in ['horizontal', 'vertical']
|
| 258 |
+
|
| 259 |
+
def __call__(self, results):
|
| 260 |
+
"""Call function to flip bounding boxes, masks, semantic segmentation
|
| 261 |
+
maps.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
results (dict): Result dict from loading pipeline.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
dict: Flipped results, 'flip', 'flip_direction' keys are added into
|
| 268 |
+
result dict.
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
if 'flip' not in results:
|
| 272 |
+
flip = True if np.random.rand() < self.prob else False
|
| 273 |
+
results['flip'] = flip
|
| 274 |
+
if 'flip_direction' not in results:
|
| 275 |
+
results['flip_direction'] = self.direction
|
| 276 |
+
if results['flip']:
|
| 277 |
+
# flip image
|
| 278 |
+
results['img'] = mmcv.imflip(
|
| 279 |
+
results['img'], direction=results['flip_direction'])
|
| 280 |
+
|
| 281 |
+
# flip segs
|
| 282 |
+
for key in results.get('seg_fields', []):
|
| 283 |
+
# use copy() to make numpy stride positive
|
| 284 |
+
results[key] = mmcv.imflip(
|
| 285 |
+
results[key], direction=results['flip_direction']).copy()
|
| 286 |
+
return results
|
| 287 |
+
|
| 288 |
+
def __repr__(self):
|
| 289 |
+
return self.__class__.__name__ + f'(prob={self.prob})'
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@PIPELINES.register_module()
|
| 293 |
+
class Pad(object):
|
| 294 |
+
"""Pad the image & mask.
|
| 295 |
+
|
| 296 |
+
There are two padding modes: (1) pad to a fixed size and (2) pad to the
|
| 297 |
+
minimum size that is divisible by some number.
|
| 298 |
+
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
size (tuple, optional): Fixed padding size.
|
| 302 |
+
size_divisor (int, optional): The divisor of padded size.
|
| 303 |
+
pad_val (float, optional): Padding value. Default: 0.
|
| 304 |
+
seg_pad_val (float, optional): Padding value of segmentation map.
|
| 305 |
+
Default: 255.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self,
|
| 309 |
+
size=None,
|
| 310 |
+
size_divisor=None,
|
| 311 |
+
pad_val=0,
|
| 312 |
+
seg_pad_val=255):
|
| 313 |
+
self.size = size
|
| 314 |
+
self.size_divisor = size_divisor
|
| 315 |
+
self.pad_val = pad_val
|
| 316 |
+
self.seg_pad_val = seg_pad_val
|
| 317 |
+
# only one of size and size_divisor should be valid
|
| 318 |
+
assert size is not None or size_divisor is not None
|
| 319 |
+
assert size is None or size_divisor is None
|
| 320 |
+
|
| 321 |
+
def _pad_img(self, results):
|
| 322 |
+
"""Pad images according to ``self.size``."""
|
| 323 |
+
if self.size is not None:
|
| 324 |
+
padded_img = mmcv.impad(
|
| 325 |
+
results['img'], shape=self.size, pad_val=self.pad_val)
|
| 326 |
+
elif self.size_divisor is not None:
|
| 327 |
+
padded_img = mmcv.impad_to_multiple(
|
| 328 |
+
results['img'], self.size_divisor, pad_val=self.pad_val)
|
| 329 |
+
results['img'] = padded_img
|
| 330 |
+
results['pad_shape'] = padded_img.shape
|
| 331 |
+
results['pad_fixed_size'] = self.size
|
| 332 |
+
results['pad_size_divisor'] = self.size_divisor
|
| 333 |
+
|
| 334 |
+
def _pad_seg(self, results):
|
| 335 |
+
"""Pad masks according to ``results['pad_shape']``."""
|
| 336 |
+
for key in results.get('seg_fields', []):
|
| 337 |
+
results[key] = mmcv.impad(
|
| 338 |
+
results[key],
|
| 339 |
+
shape=results['pad_shape'][:2],
|
| 340 |
+
pad_val=self.seg_pad_val)
|
| 341 |
+
|
| 342 |
+
def __call__(self, results):
|
| 343 |
+
"""Call function to pad images, masks, semantic segmentation maps.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
results (dict): Result dict from loading pipeline.
|
| 347 |
+
|
| 348 |
+
Returns:
|
| 349 |
+
dict: Updated result dict.
|
| 350 |
+
"""
|
| 351 |
+
|
| 352 |
+
self._pad_img(results)
|
| 353 |
+
self._pad_seg(results)
|
| 354 |
+
return results
|
| 355 |
+
|
| 356 |
+
def __repr__(self):
|
| 357 |
+
repr_str = self.__class__.__name__
|
| 358 |
+
repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
|
| 359 |
+
f'pad_val={self.pad_val})'
|
| 360 |
+
return repr_str
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
@PIPELINES.register_module()
|
| 364 |
+
class Normalize(object):
|
| 365 |
+
"""Normalize the image.
|
| 366 |
+
|
| 367 |
+
Added key is "img_norm_cfg".
|
| 368 |
+
|
| 369 |
+
Args:
|
| 370 |
+
mean (sequence): Mean values of 3 channels.
|
| 371 |
+
std (sequence): Std values of 3 channels.
|
| 372 |
+
to_rgb (bool): Whether to convert the image from BGR to RGB,
|
| 373 |
+
default is true.
|
| 374 |
+
"""
|
| 375 |
+
|
| 376 |
+
def __init__(self, mean, std, to_rgb=True):
|
| 377 |
+
self.mean = np.array(mean, dtype=np.float32)
|
| 378 |
+
self.std = np.array(std, dtype=np.float32)
|
| 379 |
+
self.to_rgb = to_rgb
|
| 380 |
+
|
| 381 |
+
def __call__(self, results):
|
| 382 |
+
"""Call function to normalize images.
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
results (dict): Result dict from loading pipeline.
|
| 386 |
+
|
| 387 |
+
Returns:
|
| 388 |
+
dict: Normalized results, 'img_norm_cfg' key is added into
|
| 389 |
+
result dict.
|
| 390 |
+
"""
|
| 391 |
+
|
| 392 |
+
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
|
| 393 |
+
self.to_rgb)
|
| 394 |
+
results['img_norm_cfg'] = dict(
|
| 395 |
+
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
|
| 396 |
+
return results
|
| 397 |
+
|
| 398 |
+
def __repr__(self):
|
| 399 |
+
repr_str = self.__class__.__name__
|
| 400 |
+
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
|
| 401 |
+
f'{self.to_rgb})'
|
| 402 |
+
return repr_str
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
@PIPELINES.register_module()
|
| 406 |
+
class Rerange(object):
|
| 407 |
+
"""Rerange the image pixel value.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
min_value (float or int): Minimum value of the reranged image.
|
| 411 |
+
Default: 0.
|
| 412 |
+
max_value (float or int): Maximum value of the reranged image.
|
| 413 |
+
Default: 255.
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, min_value=0, max_value=255):
|
| 417 |
+
assert isinstance(min_value, float) or isinstance(min_value, int)
|
| 418 |
+
assert isinstance(max_value, float) or isinstance(max_value, int)
|
| 419 |
+
assert min_value < max_value
|
| 420 |
+
self.min_value = min_value
|
| 421 |
+
self.max_value = max_value
|
| 422 |
+
|
| 423 |
+
def __call__(self, results):
|
| 424 |
+
"""Call function to rerange images.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
results (dict): Result dict from loading pipeline.
|
| 428 |
+
Returns:
|
| 429 |
+
dict: Reranged results.
|
| 430 |
+
"""
|
| 431 |
+
|
| 432 |
+
img = results['img']
|
| 433 |
+
img_min_value = np.min(img)
|
| 434 |
+
img_max_value = np.max(img)
|
| 435 |
+
|
| 436 |
+
assert img_min_value < img_max_value
|
| 437 |
+
# rerange to [0, 1]
|
| 438 |
+
img = (img - img_min_value) / (img_max_value - img_min_value)
|
| 439 |
+
# rerange to [min_value, max_value]
|
| 440 |
+
img = img * (self.max_value - self.min_value) + self.min_value
|
| 441 |
+
results['img'] = img
|
| 442 |
+
|
| 443 |
+
return results
|
| 444 |
+
|
| 445 |
+
def __repr__(self):
|
| 446 |
+
repr_str = self.__class__.__name__
|
| 447 |
+
repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
|
| 448 |
+
return repr_str
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
@PIPELINES.register_module()
|
| 452 |
+
class CLAHE(object):
|
| 453 |
+
"""Use CLAHE method to process the image.
|
| 454 |
+
|
| 455 |
+
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
|
| 456 |
+
Graphics Gems, 1994:474-485.` for more information.
|
| 457 |
+
|
| 458 |
+
Args:
|
| 459 |
+
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
|
| 460 |
+
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
|
| 461 |
+
Input image will be divided into equally sized rectangular tiles.
|
| 462 |
+
It defines the number of tiles in row and column. Default: (8, 8).
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
|
| 466 |
+
assert isinstance(clip_limit, (float, int))
|
| 467 |
+
self.clip_limit = clip_limit
|
| 468 |
+
assert is_tuple_of(tile_grid_size, int)
|
| 469 |
+
assert len(tile_grid_size) == 2
|
| 470 |
+
self.tile_grid_size = tile_grid_size
|
| 471 |
+
|
| 472 |
+
def __call__(self, results):
|
| 473 |
+
"""Call function to Use CLAHE method process images.
|
| 474 |
+
|
| 475 |
+
Args:
|
| 476 |
+
results (dict): Result dict from loading pipeline.
|
| 477 |
+
|
| 478 |
+
Returns:
|
| 479 |
+
dict: Processed results.
|
| 480 |
+
"""
|
| 481 |
+
|
| 482 |
+
for i in range(results['img'].shape[2]):
|
| 483 |
+
results['img'][:, :, i] = mmcv.clahe(
|
| 484 |
+
np.array(results['img'][:, :, i], dtype=np.uint8),
|
| 485 |
+
self.clip_limit, self.tile_grid_size)
|
| 486 |
+
|
| 487 |
+
return results
|
| 488 |
+
|
| 489 |
+
def __repr__(self):
|
| 490 |
+
repr_str = self.__class__.__name__
|
| 491 |
+
repr_str += f'(clip_limit={self.clip_limit}, '\
|
| 492 |
+
f'tile_grid_size={self.tile_grid_size})'
|
| 493 |
+
return repr_str
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
@PIPELINES.register_module()
|
| 497 |
+
class RandomCrop(object):
|
| 498 |
+
"""Random crop the image & seg.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
crop_size (tuple): Expected size after cropping, (h, w).
|
| 502 |
+
cat_max_ratio (float): The maximum ratio that single category could
|
| 503 |
+
occupy.
|
| 504 |
+
"""
|
| 505 |
+
|
| 506 |
+
def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
|
| 507 |
+
assert crop_size[0] > 0 and crop_size[1] > 0
|
| 508 |
+
self.crop_size = crop_size
|
| 509 |
+
self.cat_max_ratio = cat_max_ratio
|
| 510 |
+
self.ignore_index = ignore_index
|
| 511 |
+
|
| 512 |
+
def get_crop_bbox(self, img):
|
| 513 |
+
"""Randomly get a crop bounding box."""
|
| 514 |
+
margin_h = max(img.shape[0] - self.crop_size[0], 0)
|
| 515 |
+
margin_w = max(img.shape[1] - self.crop_size[1], 0)
|
| 516 |
+
offset_h = np.random.randint(0, margin_h + 1)
|
| 517 |
+
offset_w = np.random.randint(0, margin_w + 1)
|
| 518 |
+
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
|
| 519 |
+
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
|
| 520 |
+
|
| 521 |
+
return crop_y1, crop_y2, crop_x1, crop_x2
|
| 522 |
+
|
| 523 |
+
def crop(self, img, crop_bbox):
|
| 524 |
+
"""Crop from ``img``"""
|
| 525 |
+
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
|
| 526 |
+
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
|
| 527 |
+
return img
|
| 528 |
+
|
| 529 |
+
def __call__(self, results):
|
| 530 |
+
"""Call function to randomly crop images, semantic segmentation maps.
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
results (dict): Result dict from loading pipeline.
|
| 534 |
+
|
| 535 |
+
Returns:
|
| 536 |
+
dict: Randomly cropped results, 'img_shape' key in result dict is
|
| 537 |
+
updated according to crop size.
|
| 538 |
+
"""
|
| 539 |
+
|
| 540 |
+
img = results['img']
|
| 541 |
+
crop_bbox = self.get_crop_bbox(img)
|
| 542 |
+
if self.cat_max_ratio < 1.:
|
| 543 |
+
# Repeat 10 times
|
| 544 |
+
for _ in range(10):
|
| 545 |
+
seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
|
| 546 |
+
labels, cnt = np.unique(seg_temp, return_counts=True)
|
| 547 |
+
cnt = cnt[labels != self.ignore_index]
|
| 548 |
+
if len(cnt) > 1 and np.max(cnt) / np.sum(
|
| 549 |
+
cnt) < self.cat_max_ratio:
|
| 550 |
+
break
|
| 551 |
+
crop_bbox = self.get_crop_bbox(img)
|
| 552 |
+
|
| 553 |
+
# crop the image
|
| 554 |
+
img = self.crop(img, crop_bbox)
|
| 555 |
+
img_shape = img.shape
|
| 556 |
+
results['img'] = img
|
| 557 |
+
results['img_shape'] = img_shape
|
| 558 |
+
|
| 559 |
+
# crop semantic seg
|
| 560 |
+
for key in results.get('seg_fields', []):
|
| 561 |
+
results[key] = self.crop(results[key], crop_bbox)
|
| 562 |
+
|
| 563 |
+
return results
|
| 564 |
+
|
| 565 |
+
def __repr__(self):
|
| 566 |
+
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
@PIPELINES.register_module()
|
| 570 |
+
class RandomRotate(object):
|
| 571 |
+
"""Rotate the image & seg.
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
prob (float): The rotation probability.
|
| 575 |
+
degree (float, tuple[float]): Range of degrees to select from. If
|
| 576 |
+
degree is a number instead of tuple like (min, max),
|
| 577 |
+
the range of degree will be (``-degree``, ``+degree``)
|
| 578 |
+
pad_val (float, optional): Padding value of image. Default: 0.
|
| 579 |
+
seg_pad_val (float, optional): Padding value of segmentation map.
|
| 580 |
+
Default: 255.
|
| 581 |
+
center (tuple[float], optional): Center point (w, h) of the rotation in
|
| 582 |
+
the source image. If not specified, the center of the image will be
|
| 583 |
+
used. Default: None.
|
| 584 |
+
auto_bound (bool): Whether to adjust the image size to cover the whole
|
| 585 |
+
rotated image. Default: False
|
| 586 |
+
"""
|
| 587 |
+
|
| 588 |
+
def __init__(self,
|
| 589 |
+
prob,
|
| 590 |
+
degree,
|
| 591 |
+
pad_val=0,
|
| 592 |
+
seg_pad_val=255,
|
| 593 |
+
center=None,
|
| 594 |
+
auto_bound=False):
|
| 595 |
+
self.prob = prob
|
| 596 |
+
assert prob >= 0 and prob <= 1
|
| 597 |
+
if isinstance(degree, (float, int)):
|
| 598 |
+
assert degree > 0, f'degree {degree} should be positive'
|
| 599 |
+
self.degree = (-degree, degree)
|
| 600 |
+
else:
|
| 601 |
+
self.degree = degree
|
| 602 |
+
assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
|
| 603 |
+
f'tuple of (min, max)'
|
| 604 |
+
self.pal_val = pad_val
|
| 605 |
+
self.seg_pad_val = seg_pad_val
|
| 606 |
+
self.center = center
|
| 607 |
+
self.auto_bound = auto_bound
|
| 608 |
+
|
| 609 |
+
def __call__(self, results):
|
| 610 |
+
"""Call function to rotate image, semantic segmentation maps.
|
| 611 |
+
|
| 612 |
+
Args:
|
| 613 |
+
results (dict): Result dict from loading pipeline.
|
| 614 |
+
|
| 615 |
+
Returns:
|
| 616 |
+
dict: Rotated results.
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
rotate = True if np.random.rand() < self.prob else False
|
| 620 |
+
degree = np.random.uniform(min(*self.degree), max(*self.degree))
|
| 621 |
+
if rotate:
|
| 622 |
+
# rotate image
|
| 623 |
+
results['img'] = mmcv.imrotate(
|
| 624 |
+
results['img'],
|
| 625 |
+
angle=degree,
|
| 626 |
+
border_value=self.pal_val,
|
| 627 |
+
center=self.center,
|
| 628 |
+
auto_bound=self.auto_bound)
|
| 629 |
+
|
| 630 |
+
# rotate segs
|
| 631 |
+
for key in results.get('seg_fields', []):
|
| 632 |
+
results[key] = mmcv.imrotate(
|
| 633 |
+
results[key],
|
| 634 |
+
angle=degree,
|
| 635 |
+
border_value=self.seg_pad_val,
|
| 636 |
+
center=self.center,
|
| 637 |
+
auto_bound=self.auto_bound,
|
| 638 |
+
interpolation='nearest')
|
| 639 |
+
return results
|
| 640 |
+
|
| 641 |
+
def __repr__(self):
|
| 642 |
+
repr_str = self.__class__.__name__
|
| 643 |
+
repr_str += f'(prob={self.prob}, ' \
|
| 644 |
+
f'degree={self.degree}, ' \
|
| 645 |
+
f'pad_val={self.pal_val}, ' \
|
| 646 |
+
f'seg_pad_val={self.seg_pad_val}, ' \
|
| 647 |
+
f'center={self.center}, ' \
|
| 648 |
+
f'auto_bound={self.auto_bound})'
|
| 649 |
+
return repr_str
|
| 650 |
+
|
| 651 |
+
|
| 652 |
+
@PIPELINES.register_module()
|
| 653 |
+
class RGB2Gray(object):
|
| 654 |
+
"""Convert RGB image to grayscale image.
|
| 655 |
+
|
| 656 |
+
This transform calculate the weighted mean of input image channels with
|
| 657 |
+
``weights`` and then expand the channels to ``out_channels``. When
|
| 658 |
+
``out_channels`` is None, the number of output channels is the same as
|
| 659 |
+
input channels.
|
| 660 |
+
|
| 661 |
+
Args:
|
| 662 |
+
out_channels (int): Expected number of output channels after
|
| 663 |
+
transforming. Default: None.
|
| 664 |
+
weights (tuple[float]): The weights to calculate the weighted mean.
|
| 665 |
+
Default: (0.299, 0.587, 0.114).
|
| 666 |
+
"""
|
| 667 |
+
|
| 668 |
+
def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
|
| 669 |
+
assert out_channels is None or out_channels > 0
|
| 670 |
+
self.out_channels = out_channels
|
| 671 |
+
assert isinstance(weights, tuple)
|
| 672 |
+
for item in weights:
|
| 673 |
+
assert isinstance(item, (float, int))
|
| 674 |
+
self.weights = weights
|
| 675 |
+
|
| 676 |
+
def __call__(self, results):
|
| 677 |
+
"""Call function to convert RGB image to grayscale image.
|
| 678 |
+
|
| 679 |
+
Args:
|
| 680 |
+
results (dict): Result dict from loading pipeline.
|
| 681 |
+
|
| 682 |
+
Returns:
|
| 683 |
+
dict: Result dict with grayscale image.
|
| 684 |
+
"""
|
| 685 |
+
img = results['img']
|
| 686 |
+
assert len(img.shape) == 3
|
| 687 |
+
assert img.shape[2] == len(self.weights)
|
| 688 |
+
weights = np.array(self.weights).reshape((1, 1, -1))
|
| 689 |
+
img = (img * weights).sum(2, keepdims=True)
|
| 690 |
+
if self.out_channels is None:
|
| 691 |
+
img = img.repeat(weights.shape[2], axis=2)
|
| 692 |
+
else:
|
| 693 |
+
img = img.repeat(self.out_channels, axis=2)
|
| 694 |
+
|
| 695 |
+
results['img'] = img
|
| 696 |
+
results['img_shape'] = img.shape
|
| 697 |
+
|
| 698 |
+
return results
|
| 699 |
+
|
| 700 |
+
def __repr__(self):
|
| 701 |
+
repr_str = self.__class__.__name__
|
| 702 |
+
repr_str += f'(out_channels={self.out_channels}, ' \
|
| 703 |
+
f'weights={self.weights})'
|
| 704 |
+
return repr_str
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
@PIPELINES.register_module()
|
| 708 |
+
class AdjustGamma(object):
|
| 709 |
+
"""Using gamma correction to process the image.
|
| 710 |
+
|
| 711 |
+
Args:
|
| 712 |
+
gamma (float or int): Gamma value used in gamma correction.
|
| 713 |
+
Default: 1.0.
|
| 714 |
+
"""
|
| 715 |
+
|
| 716 |
+
def __init__(self, gamma=1.0):
|
| 717 |
+
assert isinstance(gamma, float) or isinstance(gamma, int)
|
| 718 |
+
assert gamma > 0
|
| 719 |
+
self.gamma = gamma
|
| 720 |
+
inv_gamma = 1.0 / gamma
|
| 721 |
+
self.table = np.array([(i / 255.0)**inv_gamma * 255
|
| 722 |
+
for i in np.arange(256)]).astype('uint8')
|
| 723 |
+
|
| 724 |
+
def __call__(self, results):
|
| 725 |
+
"""Call function to process the image with gamma correction.
|
| 726 |
+
|
| 727 |
+
Args:
|
| 728 |
+
results (dict): Result dict from loading pipeline.
|
| 729 |
+
|
| 730 |
+
Returns:
|
| 731 |
+
dict: Processed results.
|
| 732 |
+
"""
|
| 733 |
+
|
| 734 |
+
results['img'] = mmcv.lut_transform(
|
| 735 |
+
np.array(results['img'], dtype=np.uint8), self.table)
|
| 736 |
+
|
| 737 |
+
return results
|
| 738 |
+
|
| 739 |
+
def __repr__(self):
|
| 740 |
+
return self.__class__.__name__ + f'(gamma={self.gamma})'
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
@PIPELINES.register_module()
|
| 744 |
+
class SegRescale(object):
|
| 745 |
+
"""Rescale semantic segmentation maps.
|
| 746 |
+
|
| 747 |
+
Args:
|
| 748 |
+
scale_factor (float): The scale factor of the final output.
|
| 749 |
+
"""
|
| 750 |
+
|
| 751 |
+
def __init__(self, scale_factor=1):
|
| 752 |
+
self.scale_factor = scale_factor
|
| 753 |
+
|
| 754 |
+
def __call__(self, results):
|
| 755 |
+
"""Call function to scale the semantic segmentation map.
|
| 756 |
+
|
| 757 |
+
Args:
|
| 758 |
+
results (dict): Result dict from loading pipeline.
|
| 759 |
+
|
| 760 |
+
Returns:
|
| 761 |
+
dict: Result dict with semantic segmentation map scaled.
|
| 762 |
+
"""
|
| 763 |
+
for key in results.get('seg_fields', []):
|
| 764 |
+
if self.scale_factor != 1:
|
| 765 |
+
results[key] = mmcv.imrescale(
|
| 766 |
+
results[key], self.scale_factor, interpolation='nearest')
|
| 767 |
+
return results
|
| 768 |
+
|
| 769 |
+
def __repr__(self):
|
| 770 |
+
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@PIPELINES.register_module()
|
| 774 |
+
class PhotoMetricDistortion(object):
|
| 775 |
+
"""Apply photometric distortion to image sequentially, every transformation
|
| 776 |
+
is applied with a probability of 0.5. The position of random contrast is in
|
| 777 |
+
second or second to last.
|
| 778 |
+
|
| 779 |
+
1. random brightness
|
| 780 |
+
2. random contrast (mode 0)
|
| 781 |
+
3. convert color from BGR to HSV
|
| 782 |
+
4. random saturation
|
| 783 |
+
5. random hue
|
| 784 |
+
6. convert color from HSV to BGR
|
| 785 |
+
7. random contrast (mode 1)
|
| 786 |
+
|
| 787 |
+
Args:
|
| 788 |
+
brightness_delta (int): delta of brightness.
|
| 789 |
+
contrast_range (tuple): range of contrast.
|
| 790 |
+
saturation_range (tuple): range of saturation.
|
| 791 |
+
hue_delta (int): delta of hue.
|
| 792 |
+
"""
|
| 793 |
+
|
| 794 |
+
def __init__(self,
|
| 795 |
+
brightness_delta=32,
|
| 796 |
+
contrast_range=(0.5, 1.5),
|
| 797 |
+
saturation_range=(0.5, 1.5),
|
| 798 |
+
hue_delta=18):
|
| 799 |
+
self.brightness_delta = brightness_delta
|
| 800 |
+
self.contrast_lower, self.contrast_upper = contrast_range
|
| 801 |
+
self.saturation_lower, self.saturation_upper = saturation_range
|
| 802 |
+
self.hue_delta = hue_delta
|
| 803 |
+
|
| 804 |
+
def convert(self, img, alpha=1, beta=0):
|
| 805 |
+
"""Multiple with alpha and add beat with clip."""
|
| 806 |
+
img = img.astype(np.float32) * alpha + beta
|
| 807 |
+
img = np.clip(img, 0, 255)
|
| 808 |
+
return img.astype(np.uint8)
|
| 809 |
+
|
| 810 |
+
def brightness(self, img):
|
| 811 |
+
"""Brightness distortion."""
|
| 812 |
+
if random.randint(2):
|
| 813 |
+
return self.convert(
|
| 814 |
+
img,
|
| 815 |
+
beta=random.uniform(-self.brightness_delta,
|
| 816 |
+
self.brightness_delta))
|
| 817 |
+
return img
|
| 818 |
+
|
| 819 |
+
def contrast(self, img):
|
| 820 |
+
"""Contrast distortion."""
|
| 821 |
+
if random.randint(2):
|
| 822 |
+
return self.convert(
|
| 823 |
+
img,
|
| 824 |
+
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
|
| 825 |
+
return img
|
| 826 |
+
|
| 827 |
+
def saturation(self, img):
|
| 828 |
+
"""Saturation distortion."""
|
| 829 |
+
if random.randint(2):
|
| 830 |
+
img = mmcv.bgr2hsv(img)
|
| 831 |
+
img[:, :, 1] = self.convert(
|
| 832 |
+
img[:, :, 1],
|
| 833 |
+
alpha=random.uniform(self.saturation_lower,
|
| 834 |
+
self.saturation_upper))
|
| 835 |
+
img = mmcv.hsv2bgr(img)
|
| 836 |
+
return img
|
| 837 |
+
|
| 838 |
+
def hue(self, img):
|
| 839 |
+
"""Hue distortion."""
|
| 840 |
+
if random.randint(2):
|
| 841 |
+
img = mmcv.bgr2hsv(img)
|
| 842 |
+
img[:, :,
|
| 843 |
+
0] = (img[:, :, 0].astype(int) +
|
| 844 |
+
random.randint(-self.hue_delta, self.hue_delta)) % 180
|
| 845 |
+
img = mmcv.hsv2bgr(img)
|
| 846 |
+
return img
|
| 847 |
+
|
| 848 |
+
def __call__(self, results):
|
| 849 |
+
"""Call function to perform photometric distortion on images.
|
| 850 |
+
|
| 851 |
+
Args:
|
| 852 |
+
results (dict): Result dict from loading pipeline.
|
| 853 |
+
|
| 854 |
+
Returns:
|
| 855 |
+
dict: Result dict with images distorted.
|
| 856 |
+
"""
|
| 857 |
+
|
| 858 |
+
img = results['img']
|
| 859 |
+
# random brightness
|
| 860 |
+
img = self.brightness(img)
|
| 861 |
+
|
| 862 |
+
# mode == 0 --> do random contrast first
|
| 863 |
+
# mode == 1 --> do random contrast last
|
| 864 |
+
mode = random.randint(2)
|
| 865 |
+
if mode == 1:
|
| 866 |
+
img = self.contrast(img)
|
| 867 |
+
|
| 868 |
+
# random saturation
|
| 869 |
+
img = self.saturation(img)
|
| 870 |
+
|
| 871 |
+
# random hue
|
| 872 |
+
img = self.hue(img)
|
| 873 |
+
|
| 874 |
+
# random contrast
|
| 875 |
+
if mode == 0:
|
| 876 |
+
img = self.contrast(img)
|
| 877 |
+
|
| 878 |
+
results['img'] = img
|
| 879 |
+
return results
|
| 880 |
+
|
| 881 |
+
def __repr__(self):
|
| 882 |
+
repr_str = self.__class__.__name__
|
| 883 |
+
repr_str += (f'(brightness_delta={self.brightness_delta}, '
|
| 884 |
+
f'contrast_range=({self.contrast_lower}, '
|
| 885 |
+
f'{self.contrast_upper}), '
|
| 886 |
+
f'saturation_range=({self.saturation_lower}, '
|
| 887 |
+
f'{self.saturation_upper}), '
|
| 888 |
+
f'hue_delta={self.hue_delta})')
|
| 889 |
+
return repr_str
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/stare.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class STAREDataset(CustomDataset):
|
| 9 |
+
"""STARE dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for STARE, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'.ah.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(STAREDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='.ah.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/datasets/voc.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class PascalVOCDataset(CustomDataset):
|
| 9 |
+
"""Pascal VOC dataset.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
split (str): Split txt file for Pascal VOC.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
|
| 16 |
+
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
|
| 17 |
+
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
|
| 18 |
+
'train', 'tvmonitor')
|
| 19 |
+
|
| 20 |
+
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
|
| 21 |
+
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
|
| 22 |
+
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
|
| 23 |
+
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
|
| 24 |
+
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
|
| 25 |
+
|
| 26 |
+
def __init__(self, split, **kwargs):
|
| 27 |
+
super(PascalVOCDataset, self).__init__(
|
| 28 |
+
img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs)
|
| 29 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/models/necks/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .fpn import FPN
|
| 2 |
+
from .multilevel_neck import MultiLevelNeck
|
| 3 |
+
|
| 4 |
+
__all__ = ['FPN', 'MultiLevelNeck']
|
CCEdit-main/src/controlnet11/annotator/uniformer/mmseg/utils/collect_env.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from annotator.uniformer.mmcv.utils import collect_env as collect_base_env
|
| 2 |
+
from annotator.uniformer.mmcv.utils import get_git_hash
|
| 3 |
+
|
| 4 |
+
import annotator.uniformer.mmseg as mmseg
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def collect_env():
|
| 8 |
+
"""Collect the information of the running environments."""
|
| 9 |
+
env_info = collect_base_env()
|
| 10 |
+
env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
|
| 11 |
+
|
| 12 |
+
return env_info
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
if __name__ == '__main__':
|
| 16 |
+
for name, val in collect_env().items():
|
| 17 |
+
print('{}: {}'.format(name, val))
|
CCEdit-main/src/controlnet11/cldm/cldm.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import einops
|
| 2 |
+
import torch
|
| 3 |
+
import torch as th
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
from ldm.modules.diffusionmodules.util import (
|
| 7 |
+
conv_nd,
|
| 8 |
+
linear,
|
| 9 |
+
zero_module,
|
| 10 |
+
timestep_embedding,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
from einops import rearrange, repeat
|
| 14 |
+
from torchvision.utils import make_grid
|
| 15 |
+
from ldm.modules.attention import SpatialTransformer
|
| 16 |
+
from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock
|
| 17 |
+
from ldm.models.diffusion.ddpm import LatentDiffusion
|
| 18 |
+
from ldm.util import log_txt_as_img, exists, instantiate_from_config
|
| 19 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ControlledUnetModel(UNetModel):
|
| 23 |
+
def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs):
|
| 24 |
+
hs = []
|
| 25 |
+
with torch.no_grad():
|
| 26 |
+
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
| 27 |
+
emb = self.time_embed(t_emb)
|
| 28 |
+
h = x.type(self.dtype)
|
| 29 |
+
for module in self.input_blocks:
|
| 30 |
+
h = module(h, emb, context)
|
| 31 |
+
hs.append(h)
|
| 32 |
+
h = self.middle_block(h, emb, context)
|
| 33 |
+
|
| 34 |
+
if control is not None:
|
| 35 |
+
h += control.pop()
|
| 36 |
+
|
| 37 |
+
for i, module in enumerate(self.output_blocks):
|
| 38 |
+
if only_mid_control or control is None:
|
| 39 |
+
h = torch.cat([h, hs.pop()], dim=1)
|
| 40 |
+
else:
|
| 41 |
+
h = torch.cat([h, hs.pop() + control.pop()], dim=1)
|
| 42 |
+
h = module(h, emb, context)
|
| 43 |
+
|
| 44 |
+
h = h.type(x.dtype)
|
| 45 |
+
return self.out(h)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class ControlNet(nn.Module):
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
image_size,
|
| 52 |
+
in_channels,
|
| 53 |
+
model_channels,
|
| 54 |
+
hint_channels,
|
| 55 |
+
num_res_blocks,
|
| 56 |
+
attention_resolutions,
|
| 57 |
+
dropout=0,
|
| 58 |
+
channel_mult=(1, 2, 4, 8),
|
| 59 |
+
conv_resample=True,
|
| 60 |
+
dims=2,
|
| 61 |
+
use_checkpoint=False,
|
| 62 |
+
use_fp16=False,
|
| 63 |
+
num_heads=-1,
|
| 64 |
+
num_head_channels=-1,
|
| 65 |
+
num_heads_upsample=-1,
|
| 66 |
+
use_scale_shift_norm=False,
|
| 67 |
+
resblock_updown=False,
|
| 68 |
+
use_new_attention_order=False,
|
| 69 |
+
use_spatial_transformer=False, # custom transformer support
|
| 70 |
+
transformer_depth=1, # custom transformer support
|
| 71 |
+
context_dim=None, # custom transformer support
|
| 72 |
+
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
|
| 73 |
+
legacy=True,
|
| 74 |
+
disable_self_attentions=None,
|
| 75 |
+
num_attention_blocks=None,
|
| 76 |
+
disable_middle_self_attn=False,
|
| 77 |
+
use_linear_in_transformer=False,
|
| 78 |
+
):
|
| 79 |
+
super().__init__()
|
| 80 |
+
if use_spatial_transformer:
|
| 81 |
+
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
|
| 82 |
+
|
| 83 |
+
if context_dim is not None:
|
| 84 |
+
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
| 85 |
+
from omegaconf.listconfig import ListConfig
|
| 86 |
+
if type(context_dim) == ListConfig:
|
| 87 |
+
context_dim = list(context_dim)
|
| 88 |
+
|
| 89 |
+
if num_heads_upsample == -1:
|
| 90 |
+
num_heads_upsample = num_heads
|
| 91 |
+
|
| 92 |
+
if num_heads == -1:
|
| 93 |
+
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
|
| 94 |
+
|
| 95 |
+
if num_head_channels == -1:
|
| 96 |
+
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
|
| 97 |
+
|
| 98 |
+
self.dims = dims
|
| 99 |
+
self.image_size = image_size
|
| 100 |
+
self.in_channels = in_channels
|
| 101 |
+
self.model_channels = model_channels
|
| 102 |
+
if isinstance(num_res_blocks, int):
|
| 103 |
+
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
| 104 |
+
else:
|
| 105 |
+
if len(num_res_blocks) != len(channel_mult):
|
| 106 |
+
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
|
| 107 |
+
"as a list/tuple (per-level) with the same length as channel_mult")
|
| 108 |
+
self.num_res_blocks = num_res_blocks
|
| 109 |
+
if disable_self_attentions is not None:
|
| 110 |
+
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
|
| 111 |
+
assert len(disable_self_attentions) == len(channel_mult)
|
| 112 |
+
if num_attention_blocks is not None:
|
| 113 |
+
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
| 114 |
+
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
|
| 115 |
+
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
|
| 116 |
+
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
|
| 117 |
+
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
|
| 118 |
+
f"attention will still not be set.")
|
| 119 |
+
|
| 120 |
+
self.attention_resolutions = attention_resolutions
|
| 121 |
+
self.dropout = dropout
|
| 122 |
+
self.channel_mult = channel_mult
|
| 123 |
+
self.conv_resample = conv_resample
|
| 124 |
+
self.use_checkpoint = use_checkpoint
|
| 125 |
+
self.dtype = th.float16 if use_fp16 else th.float32
|
| 126 |
+
self.num_heads = num_heads
|
| 127 |
+
self.num_head_channels = num_head_channels
|
| 128 |
+
self.num_heads_upsample = num_heads_upsample
|
| 129 |
+
self.predict_codebook_ids = n_embed is not None
|
| 130 |
+
|
| 131 |
+
time_embed_dim = model_channels * 4
|
| 132 |
+
self.time_embed = nn.Sequential(
|
| 133 |
+
linear(model_channels, time_embed_dim),
|
| 134 |
+
nn.SiLU(),
|
| 135 |
+
linear(time_embed_dim, time_embed_dim),
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
self.input_blocks = nn.ModuleList(
|
| 139 |
+
[
|
| 140 |
+
TimestepEmbedSequential(
|
| 141 |
+
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
| 142 |
+
)
|
| 143 |
+
]
|
| 144 |
+
)
|
| 145 |
+
self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)])
|
| 146 |
+
|
| 147 |
+
self.input_hint_block = TimestepEmbedSequential(
|
| 148 |
+
conv_nd(dims, hint_channels, 16, 3, padding=1),
|
| 149 |
+
nn.SiLU(),
|
| 150 |
+
conv_nd(dims, 16, 16, 3, padding=1),
|
| 151 |
+
nn.SiLU(),
|
| 152 |
+
conv_nd(dims, 16, 32, 3, padding=1, stride=2),
|
| 153 |
+
nn.SiLU(),
|
| 154 |
+
conv_nd(dims, 32, 32, 3, padding=1),
|
| 155 |
+
nn.SiLU(),
|
| 156 |
+
conv_nd(dims, 32, 96, 3, padding=1, stride=2),
|
| 157 |
+
nn.SiLU(),
|
| 158 |
+
conv_nd(dims, 96, 96, 3, padding=1),
|
| 159 |
+
nn.SiLU(),
|
| 160 |
+
conv_nd(dims, 96, 256, 3, padding=1, stride=2),
|
| 161 |
+
nn.SiLU(),
|
| 162 |
+
zero_module(conv_nd(dims, 256, model_channels, 3, padding=1))
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
self._feature_size = model_channels
|
| 166 |
+
input_block_chans = [model_channels]
|
| 167 |
+
ch = model_channels
|
| 168 |
+
ds = 1
|
| 169 |
+
for level, mult in enumerate(channel_mult):
|
| 170 |
+
for nr in range(self.num_res_blocks[level]):
|
| 171 |
+
layers = [
|
| 172 |
+
ResBlock(
|
| 173 |
+
ch,
|
| 174 |
+
time_embed_dim,
|
| 175 |
+
dropout,
|
| 176 |
+
out_channels=mult * model_channels,
|
| 177 |
+
dims=dims,
|
| 178 |
+
use_checkpoint=use_checkpoint,
|
| 179 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 180 |
+
)
|
| 181 |
+
]
|
| 182 |
+
ch = mult * model_channels
|
| 183 |
+
if ds in attention_resolutions:
|
| 184 |
+
if num_head_channels == -1:
|
| 185 |
+
dim_head = ch // num_heads
|
| 186 |
+
else:
|
| 187 |
+
num_heads = ch // num_head_channels
|
| 188 |
+
dim_head = num_head_channels
|
| 189 |
+
if legacy:
|
| 190 |
+
# num_heads = 1
|
| 191 |
+
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
| 192 |
+
if exists(disable_self_attentions):
|
| 193 |
+
disabled_sa = disable_self_attentions[level]
|
| 194 |
+
else:
|
| 195 |
+
disabled_sa = False
|
| 196 |
+
|
| 197 |
+
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
|
| 198 |
+
layers.append(
|
| 199 |
+
AttentionBlock(
|
| 200 |
+
ch,
|
| 201 |
+
use_checkpoint=use_checkpoint,
|
| 202 |
+
num_heads=num_heads,
|
| 203 |
+
num_head_channels=dim_head,
|
| 204 |
+
use_new_attention_order=use_new_attention_order,
|
| 205 |
+
) if not use_spatial_transformer else SpatialTransformer(
|
| 206 |
+
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
| 207 |
+
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
| 208 |
+
use_checkpoint=use_checkpoint
|
| 209 |
+
)
|
| 210 |
+
)
|
| 211 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
| 212 |
+
self.zero_convs.append(self.make_zero_conv(ch))
|
| 213 |
+
self._feature_size += ch
|
| 214 |
+
input_block_chans.append(ch)
|
| 215 |
+
if level != len(channel_mult) - 1:
|
| 216 |
+
out_ch = ch
|
| 217 |
+
self.input_blocks.append(
|
| 218 |
+
TimestepEmbedSequential(
|
| 219 |
+
ResBlock(
|
| 220 |
+
ch,
|
| 221 |
+
time_embed_dim,
|
| 222 |
+
dropout,
|
| 223 |
+
out_channels=out_ch,
|
| 224 |
+
dims=dims,
|
| 225 |
+
use_checkpoint=use_checkpoint,
|
| 226 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 227 |
+
down=True,
|
| 228 |
+
)
|
| 229 |
+
if resblock_updown
|
| 230 |
+
else Downsample(
|
| 231 |
+
ch, conv_resample, dims=dims, out_channels=out_ch
|
| 232 |
+
)
|
| 233 |
+
)
|
| 234 |
+
)
|
| 235 |
+
ch = out_ch
|
| 236 |
+
input_block_chans.append(ch)
|
| 237 |
+
self.zero_convs.append(self.make_zero_conv(ch))
|
| 238 |
+
ds *= 2
|
| 239 |
+
self._feature_size += ch
|
| 240 |
+
|
| 241 |
+
if num_head_channels == -1:
|
| 242 |
+
dim_head = ch // num_heads
|
| 243 |
+
else:
|
| 244 |
+
num_heads = ch // num_head_channels
|
| 245 |
+
dim_head = num_head_channels
|
| 246 |
+
if legacy:
|
| 247 |
+
# num_heads = 1
|
| 248 |
+
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
| 249 |
+
self.middle_block = TimestepEmbedSequential(
|
| 250 |
+
ResBlock(
|
| 251 |
+
ch,
|
| 252 |
+
time_embed_dim,
|
| 253 |
+
dropout,
|
| 254 |
+
dims=dims,
|
| 255 |
+
use_checkpoint=use_checkpoint,
|
| 256 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 257 |
+
),
|
| 258 |
+
AttentionBlock(
|
| 259 |
+
ch,
|
| 260 |
+
use_checkpoint=use_checkpoint,
|
| 261 |
+
num_heads=num_heads,
|
| 262 |
+
num_head_channels=dim_head,
|
| 263 |
+
use_new_attention_order=use_new_attention_order,
|
| 264 |
+
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
|
| 265 |
+
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
| 266 |
+
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
|
| 267 |
+
use_checkpoint=use_checkpoint
|
| 268 |
+
),
|
| 269 |
+
ResBlock(
|
| 270 |
+
ch,
|
| 271 |
+
time_embed_dim,
|
| 272 |
+
dropout,
|
| 273 |
+
dims=dims,
|
| 274 |
+
use_checkpoint=use_checkpoint,
|
| 275 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 276 |
+
),
|
| 277 |
+
)
|
| 278 |
+
self.middle_block_out = self.make_zero_conv(ch)
|
| 279 |
+
self._feature_size += ch
|
| 280 |
+
|
| 281 |
+
def make_zero_conv(self, channels):
|
| 282 |
+
return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0)))
|
| 283 |
+
|
| 284 |
+
def forward(self, x, hint, timesteps, context, **kwargs):
|
| 285 |
+
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
| 286 |
+
emb = self.time_embed(t_emb)
|
| 287 |
+
|
| 288 |
+
guided_hint = self.input_hint_block(hint, emb, context)
|
| 289 |
+
|
| 290 |
+
outs = []
|
| 291 |
+
|
| 292 |
+
h = x.type(self.dtype)
|
| 293 |
+
for module, zero_conv in zip(self.input_blocks, self.zero_convs):
|
| 294 |
+
if guided_hint is not None:
|
| 295 |
+
h = module(h, emb, context)
|
| 296 |
+
h += guided_hint
|
| 297 |
+
guided_hint = None
|
| 298 |
+
else:
|
| 299 |
+
h = module(h, emb, context)
|
| 300 |
+
outs.append(zero_conv(h, emb, context))
|
| 301 |
+
|
| 302 |
+
h = self.middle_block(h, emb, context)
|
| 303 |
+
outs.append(self.middle_block_out(h, emb, context))
|
| 304 |
+
|
| 305 |
+
return outs
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class ControlLDM(LatentDiffusion):
|
| 309 |
+
|
| 310 |
+
def __init__(self, control_stage_config, control_key, only_mid_control, global_average_pooling=False, *args, **kwargs):
|
| 311 |
+
super().__init__(*args, **kwargs)
|
| 312 |
+
self.control_model = instantiate_from_config(control_stage_config)
|
| 313 |
+
self.control_key = control_key
|
| 314 |
+
self.only_mid_control = only_mid_control
|
| 315 |
+
self.control_scales = [1.0] * 13
|
| 316 |
+
self.global_average_pooling = global_average_pooling
|
| 317 |
+
|
| 318 |
+
@torch.no_grad()
|
| 319 |
+
def get_input(self, batch, k, bs=None, *args, **kwargs):
|
| 320 |
+
x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs)
|
| 321 |
+
control = batch[self.control_key]
|
| 322 |
+
if bs is not None:
|
| 323 |
+
control = control[:bs]
|
| 324 |
+
control = control.to(self.device)
|
| 325 |
+
control = einops.rearrange(control, 'b h w c -> b c h w')
|
| 326 |
+
control = control.to(memory_format=torch.contiguous_format).float()
|
| 327 |
+
return x, dict(c_crossattn=[c], c_concat=[control])
|
| 328 |
+
|
| 329 |
+
def apply_model(self, x_noisy, t, cond, *args, **kwargs):
|
| 330 |
+
assert isinstance(cond, dict)
|
| 331 |
+
diffusion_model = self.model.diffusion_model
|
| 332 |
+
|
| 333 |
+
cond_txt = torch.cat(cond['c_crossattn'], 1)
|
| 334 |
+
|
| 335 |
+
if cond['c_concat'] is None:
|
| 336 |
+
eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=None, only_mid_control=self.only_mid_control)
|
| 337 |
+
else:
|
| 338 |
+
control = self.control_model(x=x_noisy, hint=torch.cat(cond['c_concat'], 1), timesteps=t, context=cond_txt)
|
| 339 |
+
control = [c * scale for c, scale in zip(control, self.control_scales)]
|
| 340 |
+
if self.global_average_pooling:
|
| 341 |
+
control = [torch.mean(c, dim=(2, 3), keepdim=True) for c in control]
|
| 342 |
+
eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control)
|
| 343 |
+
|
| 344 |
+
return eps
|
| 345 |
+
|
| 346 |
+
@torch.no_grad()
|
| 347 |
+
def get_unconditional_conditioning(self, N):
|
| 348 |
+
return self.get_learned_conditioning([""] * N)
|
| 349 |
+
|
| 350 |
+
@torch.no_grad()
|
| 351 |
+
def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None,
|
| 352 |
+
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
| 353 |
+
plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None,
|
| 354 |
+
use_ema_scope=True,
|
| 355 |
+
**kwargs):
|
| 356 |
+
use_ddim = ddim_steps is not None
|
| 357 |
+
|
| 358 |
+
log = dict()
|
| 359 |
+
z, c = self.get_input(batch, self.first_stage_key, bs=N)
|
| 360 |
+
c_cat, c = c["c_concat"][0][:N], c["c_crossattn"][0][:N]
|
| 361 |
+
N = min(z.shape[0], N)
|
| 362 |
+
n_row = min(z.shape[0], n_row)
|
| 363 |
+
log["reconstruction"] = self.decode_first_stage(z)
|
| 364 |
+
log["control"] = c_cat * 2.0 - 1.0
|
| 365 |
+
log["conditioning"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16)
|
| 366 |
+
|
| 367 |
+
if plot_diffusion_rows:
|
| 368 |
+
# get diffusion row
|
| 369 |
+
diffusion_row = list()
|
| 370 |
+
z_start = z[:n_row]
|
| 371 |
+
for t in range(self.num_timesteps):
|
| 372 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
| 373 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
| 374 |
+
t = t.to(self.device).long()
|
| 375 |
+
noise = torch.randn_like(z_start)
|
| 376 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
| 377 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
| 378 |
+
|
| 379 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
| 380 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
| 381 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
| 382 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
| 383 |
+
log["diffusion_row"] = diffusion_grid
|
| 384 |
+
|
| 385 |
+
if sample:
|
| 386 |
+
# get denoise row
|
| 387 |
+
samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
| 388 |
+
batch_size=N, ddim=use_ddim,
|
| 389 |
+
ddim_steps=ddim_steps, eta=ddim_eta)
|
| 390 |
+
x_samples = self.decode_first_stage(samples)
|
| 391 |
+
log["samples"] = x_samples
|
| 392 |
+
if plot_denoise_rows:
|
| 393 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
| 394 |
+
log["denoise_row"] = denoise_grid
|
| 395 |
+
|
| 396 |
+
if unconditional_guidance_scale > 1.0:
|
| 397 |
+
uc_cross = self.get_unconditional_conditioning(N)
|
| 398 |
+
uc_cat = c_cat # torch.zeros_like(c_cat)
|
| 399 |
+
uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
|
| 400 |
+
samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
| 401 |
+
batch_size=N, ddim=use_ddim,
|
| 402 |
+
ddim_steps=ddim_steps, eta=ddim_eta,
|
| 403 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 404 |
+
unconditional_conditioning=uc_full,
|
| 405 |
+
)
|
| 406 |
+
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
| 407 |
+
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
| 408 |
+
|
| 409 |
+
return log
|
| 410 |
+
|
| 411 |
+
@torch.no_grad()
|
| 412 |
+
def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
|
| 413 |
+
ddim_sampler = DDIMSampler(self)
|
| 414 |
+
b, c, h, w = cond["c_concat"][0].shape
|
| 415 |
+
shape = (self.channels, h // 8, w // 8)
|
| 416 |
+
samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs)
|
| 417 |
+
return samples, intermediates
|
| 418 |
+
|
| 419 |
+
def configure_optimizers(self):
|
| 420 |
+
lr = self.learning_rate
|
| 421 |
+
params = list(self.control_model.parameters())
|
| 422 |
+
if not self.sd_locked:
|
| 423 |
+
params += list(self.model.diffusion_model.output_blocks.parameters())
|
| 424 |
+
params += list(self.model.diffusion_model.out.parameters())
|
| 425 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
| 426 |
+
return opt
|
| 427 |
+
|
| 428 |
+
def low_vram_shift(self, is_diffusing):
|
| 429 |
+
if is_diffusing:
|
| 430 |
+
self.model = self.model.cuda()
|
| 431 |
+
self.control_model = self.control_model.cuda()
|
| 432 |
+
self.first_stage_model = self.first_stage_model.cpu()
|
| 433 |
+
self.cond_stage_model = self.cond_stage_model.cpu()
|
| 434 |
+
else:
|
| 435 |
+
self.model = self.model.cpu()
|
| 436 |
+
self.control_model = self.control_model.cpu()
|
| 437 |
+
self.first_stage_model = self.first_stage_model.cuda()
|
| 438 |
+
self.cond_stage_model = self.cond_stage_model.cuda()
|
CCEdit-main/src/controlnet11/cldm/ddim_hacked.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SAMPLING ONLY."""
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
|
| 7 |
+
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DDIMSampler(object):
|
| 11 |
+
def __init__(self, model, schedule="linear", **kwargs):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.model = model
|
| 14 |
+
self.ddpm_num_timesteps = model.num_timesteps
|
| 15 |
+
self.schedule = schedule
|
| 16 |
+
|
| 17 |
+
def register_buffer(self, name, attr):
|
| 18 |
+
if type(attr) == torch.Tensor:
|
| 19 |
+
if attr.device != torch.device("cuda"):
|
| 20 |
+
attr = attr.to(torch.device("cuda"))
|
| 21 |
+
setattr(self, name, attr)
|
| 22 |
+
|
| 23 |
+
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
| 24 |
+
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
| 25 |
+
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
| 26 |
+
alphas_cumprod = self.model.alphas_cumprod
|
| 27 |
+
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
| 28 |
+
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
| 29 |
+
|
| 30 |
+
self.register_buffer('betas', to_torch(self.model.betas))
|
| 31 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
| 32 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
| 33 |
+
|
| 34 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
| 35 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
| 36 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
| 37 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
| 38 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
| 39 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
| 40 |
+
|
| 41 |
+
# ddim sampling parameters
|
| 42 |
+
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
| 43 |
+
ddim_timesteps=self.ddim_timesteps,
|
| 44 |
+
eta=ddim_eta,verbose=verbose)
|
| 45 |
+
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
| 46 |
+
self.register_buffer('ddim_alphas', ddim_alphas)
|
| 47 |
+
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
| 48 |
+
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
| 49 |
+
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
| 50 |
+
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
| 51 |
+
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
| 52 |
+
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
| 53 |
+
|
| 54 |
+
@torch.no_grad()
|
| 55 |
+
def sample(self,
|
| 56 |
+
S,
|
| 57 |
+
batch_size,
|
| 58 |
+
shape,
|
| 59 |
+
conditioning=None,
|
| 60 |
+
callback=None,
|
| 61 |
+
normals_sequence=None,
|
| 62 |
+
img_callback=None,
|
| 63 |
+
quantize_x0=False,
|
| 64 |
+
eta=0.,
|
| 65 |
+
mask=None,
|
| 66 |
+
x0=None,
|
| 67 |
+
temperature=1.,
|
| 68 |
+
noise_dropout=0.,
|
| 69 |
+
score_corrector=None,
|
| 70 |
+
corrector_kwargs=None,
|
| 71 |
+
verbose=True,
|
| 72 |
+
x_T=None,
|
| 73 |
+
log_every_t=100,
|
| 74 |
+
unconditional_guidance_scale=1.,
|
| 75 |
+
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
| 76 |
+
dynamic_threshold=None,
|
| 77 |
+
ucg_schedule=None,
|
| 78 |
+
**kwargs
|
| 79 |
+
):
|
| 80 |
+
if conditioning is not None:
|
| 81 |
+
if isinstance(conditioning, dict):
|
| 82 |
+
ctmp = conditioning[list(conditioning.keys())[0]]
|
| 83 |
+
while isinstance(ctmp, list): ctmp = ctmp[0]
|
| 84 |
+
cbs = ctmp.shape[0]
|
| 85 |
+
if cbs != batch_size:
|
| 86 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 87 |
+
|
| 88 |
+
elif isinstance(conditioning, list):
|
| 89 |
+
for ctmp in conditioning:
|
| 90 |
+
if ctmp.shape[0] != batch_size:
|
| 91 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 92 |
+
|
| 93 |
+
else:
|
| 94 |
+
if conditioning.shape[0] != batch_size:
|
| 95 |
+
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
| 96 |
+
|
| 97 |
+
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
| 98 |
+
# sampling
|
| 99 |
+
C, H, W = shape
|
| 100 |
+
size = (batch_size, C, H, W)
|
| 101 |
+
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
|
| 102 |
+
|
| 103 |
+
samples, intermediates = self.ddim_sampling(conditioning, size,
|
| 104 |
+
callback=callback,
|
| 105 |
+
img_callback=img_callback,
|
| 106 |
+
quantize_denoised=quantize_x0,
|
| 107 |
+
mask=mask, x0=x0,
|
| 108 |
+
ddim_use_original_steps=False,
|
| 109 |
+
noise_dropout=noise_dropout,
|
| 110 |
+
temperature=temperature,
|
| 111 |
+
score_corrector=score_corrector,
|
| 112 |
+
corrector_kwargs=corrector_kwargs,
|
| 113 |
+
x_T=x_T,
|
| 114 |
+
log_every_t=log_every_t,
|
| 115 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 116 |
+
unconditional_conditioning=unconditional_conditioning,
|
| 117 |
+
dynamic_threshold=dynamic_threshold,
|
| 118 |
+
ucg_schedule=ucg_schedule
|
| 119 |
+
)
|
| 120 |
+
return samples, intermediates
|
| 121 |
+
|
| 122 |
+
@torch.no_grad()
|
| 123 |
+
def ddim_sampling(self, cond, shape,
|
| 124 |
+
x_T=None, ddim_use_original_steps=False,
|
| 125 |
+
callback=None, timesteps=None, quantize_denoised=False,
|
| 126 |
+
mask=None, x0=None, img_callback=None, log_every_t=100,
|
| 127 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
| 128 |
+
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
|
| 129 |
+
ucg_schedule=None):
|
| 130 |
+
device = self.model.betas.device
|
| 131 |
+
b = shape[0]
|
| 132 |
+
if x_T is None:
|
| 133 |
+
img = torch.randn(shape, device=device)
|
| 134 |
+
else:
|
| 135 |
+
img = x_T
|
| 136 |
+
|
| 137 |
+
if timesteps is None:
|
| 138 |
+
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
| 139 |
+
elif timesteps is not None and not ddim_use_original_steps:
|
| 140 |
+
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
| 141 |
+
timesteps = self.ddim_timesteps[:subset_end]
|
| 142 |
+
|
| 143 |
+
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
| 144 |
+
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
|
| 145 |
+
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
| 146 |
+
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
| 147 |
+
|
| 148 |
+
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
|
| 149 |
+
|
| 150 |
+
for i, step in enumerate(iterator):
|
| 151 |
+
index = total_steps - i - 1
|
| 152 |
+
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
| 153 |
+
|
| 154 |
+
if mask is not None:
|
| 155 |
+
assert x0 is not None
|
| 156 |
+
img_orig = self.model.q_sample(x0, ts)
|
| 157 |
+
img = img_orig * mask + (1. - mask) * img
|
| 158 |
+
|
| 159 |
+
if ucg_schedule is not None:
|
| 160 |
+
assert len(ucg_schedule) == len(time_range)
|
| 161 |
+
unconditional_guidance_scale = ucg_schedule[i]
|
| 162 |
+
|
| 163 |
+
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
| 164 |
+
quantize_denoised=quantize_denoised, temperature=temperature,
|
| 165 |
+
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
| 166 |
+
corrector_kwargs=corrector_kwargs,
|
| 167 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 168 |
+
unconditional_conditioning=unconditional_conditioning,
|
| 169 |
+
dynamic_threshold=dynamic_threshold)
|
| 170 |
+
img, pred_x0 = outs
|
| 171 |
+
if callback: callback(i)
|
| 172 |
+
if img_callback: img_callback(pred_x0, i)
|
| 173 |
+
|
| 174 |
+
if index % log_every_t == 0 or index == total_steps - 1:
|
| 175 |
+
intermediates['x_inter'].append(img)
|
| 176 |
+
intermediates['pred_x0'].append(pred_x0)
|
| 177 |
+
|
| 178 |
+
return img, intermediates
|
| 179 |
+
|
| 180 |
+
@torch.no_grad()
|
| 181 |
+
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
| 182 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
| 183 |
+
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
| 184 |
+
dynamic_threshold=None):
|
| 185 |
+
b, *_, device = *x.shape, x.device
|
| 186 |
+
|
| 187 |
+
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
| 188 |
+
model_output = self.model.apply_model(x, t, c)
|
| 189 |
+
else:
|
| 190 |
+
model_t = self.model.apply_model(x, t, c)
|
| 191 |
+
model_uncond = self.model.apply_model(x, t, unconditional_conditioning)
|
| 192 |
+
model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
|
| 193 |
+
|
| 194 |
+
if self.model.parameterization == "v":
|
| 195 |
+
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
|
| 196 |
+
else:
|
| 197 |
+
e_t = model_output
|
| 198 |
+
|
| 199 |
+
if score_corrector is not None:
|
| 200 |
+
assert self.model.parameterization == "eps", 'not implemented'
|
| 201 |
+
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
| 202 |
+
|
| 203 |
+
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
| 204 |
+
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
| 205 |
+
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
| 206 |
+
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
| 207 |
+
# select parameters corresponding to the currently considered timestep
|
| 208 |
+
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
| 209 |
+
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
| 210 |
+
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
| 211 |
+
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
| 212 |
+
|
| 213 |
+
# current prediction for x_0
|
| 214 |
+
if self.model.parameterization != "v":
|
| 215 |
+
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
| 216 |
+
else:
|
| 217 |
+
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
|
| 218 |
+
|
| 219 |
+
if quantize_denoised:
|
| 220 |
+
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
| 221 |
+
|
| 222 |
+
if dynamic_threshold is not None:
|
| 223 |
+
raise NotImplementedError()
|
| 224 |
+
|
| 225 |
+
# direction pointing to x_t
|
| 226 |
+
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
| 227 |
+
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
| 228 |
+
if noise_dropout > 0.:
|
| 229 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
| 230 |
+
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
| 231 |
+
return x_prev, pred_x0
|
| 232 |
+
|
| 233 |
+
@torch.no_grad()
|
| 234 |
+
def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
|
| 235 |
+
unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
|
| 236 |
+
timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
|
| 237 |
+
num_reference_steps = timesteps.shape[0]
|
| 238 |
+
|
| 239 |
+
assert t_enc <= num_reference_steps
|
| 240 |
+
num_steps = t_enc
|
| 241 |
+
|
| 242 |
+
if use_original_steps:
|
| 243 |
+
alphas_next = self.alphas_cumprod[:num_steps]
|
| 244 |
+
alphas = self.alphas_cumprod_prev[:num_steps]
|
| 245 |
+
else:
|
| 246 |
+
alphas_next = self.ddim_alphas[:num_steps]
|
| 247 |
+
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
|
| 248 |
+
|
| 249 |
+
x_next = x0
|
| 250 |
+
intermediates = []
|
| 251 |
+
inter_steps = []
|
| 252 |
+
for i in tqdm(range(num_steps), desc='Encoding Image'):
|
| 253 |
+
t = torch.full((x0.shape[0],), timesteps[i], device=self.model.device, dtype=torch.long)
|
| 254 |
+
if unconditional_guidance_scale == 1.:
|
| 255 |
+
noise_pred = self.model.apply_model(x_next, t, c)
|
| 256 |
+
else:
|
| 257 |
+
assert unconditional_conditioning is not None
|
| 258 |
+
e_t_uncond, noise_pred = torch.chunk(
|
| 259 |
+
self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
|
| 260 |
+
torch.cat((unconditional_conditioning, c))), 2)
|
| 261 |
+
noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
|
| 262 |
+
|
| 263 |
+
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
|
| 264 |
+
weighted_noise_pred = alphas_next[i].sqrt() * (
|
| 265 |
+
(1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
|
| 266 |
+
x_next = xt_weighted + weighted_noise_pred
|
| 267 |
+
if return_intermediates and i % (
|
| 268 |
+
num_steps // return_intermediates) == 0 and i < num_steps - 1:
|
| 269 |
+
intermediates.append(x_next)
|
| 270 |
+
inter_steps.append(i)
|
| 271 |
+
elif return_intermediates and i >= num_steps - 2:
|
| 272 |
+
intermediates.append(x_next)
|
| 273 |
+
inter_steps.append(i)
|
| 274 |
+
if callback: callback(i)
|
| 275 |
+
|
| 276 |
+
out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
|
| 277 |
+
if return_intermediates:
|
| 278 |
+
out.update({'intermediates': intermediates})
|
| 279 |
+
return x_next, out
|
| 280 |
+
|
| 281 |
+
@torch.no_grad()
|
| 282 |
+
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
|
| 283 |
+
# fast, but does not allow for exact reconstruction
|
| 284 |
+
# t serves as an index to gather the correct alphas
|
| 285 |
+
if use_original_steps:
|
| 286 |
+
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
|
| 287 |
+
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
|
| 288 |
+
else:
|
| 289 |
+
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
|
| 290 |
+
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
|
| 291 |
+
|
| 292 |
+
if noise is None:
|
| 293 |
+
noise = torch.randn_like(x0)
|
| 294 |
+
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
|
| 295 |
+
extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
|
| 296 |
+
|
| 297 |
+
@torch.no_grad()
|
| 298 |
+
def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
| 299 |
+
use_original_steps=False, callback=None):
|
| 300 |
+
|
| 301 |
+
timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
|
| 302 |
+
timesteps = timesteps[:t_start]
|
| 303 |
+
|
| 304 |
+
time_range = np.flip(timesteps)
|
| 305 |
+
total_steps = timesteps.shape[0]
|
| 306 |
+
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
| 307 |
+
|
| 308 |
+
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
|
| 309 |
+
x_dec = x_latent
|
| 310 |
+
for i, step in enumerate(iterator):
|
| 311 |
+
index = total_steps - i - 1
|
| 312 |
+
ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
|
| 313 |
+
x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
|
| 314 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 315 |
+
unconditional_conditioning=unconditional_conditioning)
|
| 316 |
+
if callback: callback(i)
|
| 317 |
+
return x_dec
|
CCEdit-main/src/controlnet11/cldm/hack.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import einops
|
| 3 |
+
|
| 4 |
+
import ldm.modules.encoders.modules
|
| 5 |
+
import ldm.modules.attention
|
| 6 |
+
|
| 7 |
+
from transformers import logging
|
| 8 |
+
from ldm.modules.attention import default
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def disable_verbosity():
|
| 12 |
+
logging.set_verbosity_error()
|
| 13 |
+
print('logging improved.')
|
| 14 |
+
return
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def enable_sliced_attention():
|
| 18 |
+
ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward
|
| 19 |
+
print('Enabled sliced_attention.')
|
| 20 |
+
return
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def hack_everything(clip_skip=0):
|
| 24 |
+
disable_verbosity()
|
| 25 |
+
ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward
|
| 26 |
+
ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip
|
| 27 |
+
print('Enabled clip hacks.')
|
| 28 |
+
return
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Written by Lvmin
|
| 32 |
+
def _hacked_clip_forward(self, text):
|
| 33 |
+
PAD = self.tokenizer.pad_token_id
|
| 34 |
+
EOS = self.tokenizer.eos_token_id
|
| 35 |
+
BOS = self.tokenizer.bos_token_id
|
| 36 |
+
|
| 37 |
+
def tokenize(t):
|
| 38 |
+
return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"]
|
| 39 |
+
|
| 40 |
+
def transformer_encode(t):
|
| 41 |
+
if self.clip_skip > 1:
|
| 42 |
+
rt = self.transformer(input_ids=t, output_hidden_states=True)
|
| 43 |
+
return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip])
|
| 44 |
+
else:
|
| 45 |
+
return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state
|
| 46 |
+
|
| 47 |
+
def split(x):
|
| 48 |
+
return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3]
|
| 49 |
+
|
| 50 |
+
def pad(x, p, i):
|
| 51 |
+
return x[:i] if len(x) >= i else x + [p] * (i - len(x))
|
| 52 |
+
|
| 53 |
+
raw_tokens_list = tokenize(text)
|
| 54 |
+
tokens_list = []
|
| 55 |
+
|
| 56 |
+
for raw_tokens in raw_tokens_list:
|
| 57 |
+
raw_tokens_123 = split(raw_tokens)
|
| 58 |
+
raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123]
|
| 59 |
+
raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123]
|
| 60 |
+
tokens_list.append(raw_tokens_123)
|
| 61 |
+
|
| 62 |
+
tokens_list = torch.IntTensor(tokens_list).to(self.device)
|
| 63 |
+
|
| 64 |
+
feed = einops.rearrange(tokens_list, 'b f i -> (b f) i')
|
| 65 |
+
y = transformer_encode(feed)
|
| 66 |
+
z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3)
|
| 67 |
+
|
| 68 |
+
return z
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py
|
| 72 |
+
def _hacked_sliced_attentin_forward(self, x, context=None, mask=None):
|
| 73 |
+
h = self.heads
|
| 74 |
+
|
| 75 |
+
q = self.to_q(x)
|
| 76 |
+
context = default(context, x)
|
| 77 |
+
k = self.to_k(context)
|
| 78 |
+
v = self.to_v(context)
|
| 79 |
+
del context, x
|
| 80 |
+
|
| 81 |
+
q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
| 82 |
+
|
| 83 |
+
limit = k.shape[0]
|
| 84 |
+
att_step = 1
|
| 85 |
+
q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0))
|
| 86 |
+
k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0))
|
| 87 |
+
v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0))
|
| 88 |
+
|
| 89 |
+
q_chunks.reverse()
|
| 90 |
+
k_chunks.reverse()
|
| 91 |
+
v_chunks.reverse()
|
| 92 |
+
sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)
|
| 93 |
+
del k, q, v
|
| 94 |
+
for i in range(0, limit, att_step):
|
| 95 |
+
q_buffer = q_chunks.pop()
|
| 96 |
+
k_buffer = k_chunks.pop()
|
| 97 |
+
v_buffer = v_chunks.pop()
|
| 98 |
+
sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale
|
| 99 |
+
|
| 100 |
+
del k_buffer, q_buffer
|
| 101 |
+
# attention, what we cannot get enough of, by chunks
|
| 102 |
+
|
| 103 |
+
sim_buffer = sim_buffer.softmax(dim=-1)
|
| 104 |
+
|
| 105 |
+
sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer)
|
| 106 |
+
del v_buffer
|
| 107 |
+
sim[i:i + att_step, :, :] = sim_buffer
|
| 108 |
+
|
| 109 |
+
del sim_buffer
|
| 110 |
+
sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h)
|
| 111 |
+
return self.to_out(sim)
|
CCEdit-main/src/controlnet11/cldm/logger.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import torchvision
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from pytorch_lightning.callbacks import Callback
|
| 8 |
+
from pytorch_lightning.utilities.distributed import rank_zero_only
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ImageLogger(Callback):
|
| 12 |
+
def __init__(self, batch_frequency=2000, max_images=4, clamp=True, increase_log_steps=True,
|
| 13 |
+
rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
|
| 14 |
+
log_images_kwargs=None):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.rescale = rescale
|
| 17 |
+
self.batch_freq = batch_frequency
|
| 18 |
+
self.max_images = max_images
|
| 19 |
+
if not increase_log_steps:
|
| 20 |
+
self.log_steps = [self.batch_freq]
|
| 21 |
+
self.clamp = clamp
|
| 22 |
+
self.disabled = disabled
|
| 23 |
+
self.log_on_batch_idx = log_on_batch_idx
|
| 24 |
+
self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
|
| 25 |
+
self.log_first_step = log_first_step
|
| 26 |
+
|
| 27 |
+
@rank_zero_only
|
| 28 |
+
def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
|
| 29 |
+
root = os.path.join(save_dir, "image_log", split)
|
| 30 |
+
for k in images:
|
| 31 |
+
grid = torchvision.utils.make_grid(images[k], nrow=4)
|
| 32 |
+
if self.rescale:
|
| 33 |
+
grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
|
| 34 |
+
grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
|
| 35 |
+
grid = grid.numpy()
|
| 36 |
+
grid = (grid * 255).astype(np.uint8)
|
| 37 |
+
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(k, global_step, current_epoch, batch_idx)
|
| 38 |
+
path = os.path.join(root, filename)
|
| 39 |
+
os.makedirs(os.path.split(path)[0], exist_ok=True)
|
| 40 |
+
Image.fromarray(grid).save(path)
|
| 41 |
+
|
| 42 |
+
def log_img(self, pl_module, batch, batch_idx, split="train"):
|
| 43 |
+
check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step
|
| 44 |
+
if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0
|
| 45 |
+
hasattr(pl_module, "log_images") and
|
| 46 |
+
callable(pl_module.log_images) and
|
| 47 |
+
self.max_images > 0):
|
| 48 |
+
logger = type(pl_module.logger)
|
| 49 |
+
|
| 50 |
+
is_train = pl_module.training
|
| 51 |
+
if is_train:
|
| 52 |
+
pl_module.eval()
|
| 53 |
+
|
| 54 |
+
with torch.no_grad():
|
| 55 |
+
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
|
| 56 |
+
|
| 57 |
+
for k in images:
|
| 58 |
+
N = min(images[k].shape[0], self.max_images)
|
| 59 |
+
images[k] = images[k][:N]
|
| 60 |
+
if isinstance(images[k], torch.Tensor):
|
| 61 |
+
images[k] = images[k].detach().cpu()
|
| 62 |
+
if self.clamp:
|
| 63 |
+
images[k] = torch.clamp(images[k], -1., 1.)
|
| 64 |
+
|
| 65 |
+
self.log_local(pl_module.logger.save_dir, split, images,
|
| 66 |
+
pl_module.global_step, pl_module.current_epoch, batch_idx)
|
| 67 |
+
|
| 68 |
+
if is_train:
|
| 69 |
+
pl_module.train()
|
| 70 |
+
|
| 71 |
+
def check_frequency(self, check_idx):
|
| 72 |
+
return check_idx % self.batch_freq == 0
|
| 73 |
+
|
| 74 |
+
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
|
| 75 |
+
if not self.disabled:
|
| 76 |
+
self.log_img(pl_module, batch, batch_idx, split="train")
|
CCEdit-main/src/controlnet11/cldm/model.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from omegaconf import OmegaConf
|
| 5 |
+
from ldm.util import instantiate_from_config
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_state_dict(d):
|
| 9 |
+
return d.get('state_dict', d)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def load_state_dict(ckpt_path, location='cpu'):
|
| 13 |
+
_, extension = os.path.splitext(ckpt_path)
|
| 14 |
+
if extension.lower() == ".safetensors":
|
| 15 |
+
import safetensors.torch
|
| 16 |
+
state_dict = safetensors.torch.load_file(ckpt_path, device=location)
|
| 17 |
+
else:
|
| 18 |
+
state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))
|
| 19 |
+
state_dict = get_state_dict(state_dict)
|
| 20 |
+
print(f'Loaded state_dict from [{ckpt_path}]')
|
| 21 |
+
return state_dict
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def create_model(config_path):
|
| 25 |
+
config = OmegaConf.load(config_path)
|
| 26 |
+
model = instantiate_from_config(config.model).cpu()
|
| 27 |
+
print(f'Loaded model config from [{config_path}]')
|
| 28 |
+
return model
|
CCEdit-main/src/controlnet11/github_docs/annotator.md
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Automatic Annotations
|
| 2 |
+
|
| 3 |
+
We provide gradio examples to obtain annotations that are aligned to our pretrained production-ready models.
|
| 4 |
+
|
| 5 |
+
Just run
|
| 6 |
+
|
| 7 |
+
python gradio_annotator.py
|
| 8 |
+
|
| 9 |
+
Since everyone has different habit to organize their datasets, we do not hard code any scripts for batch processing. But "gradio_annotator.py" is written in a super readable way, and modifying it to annotate your images should be easy.
|
| 10 |
+
|
| 11 |
+
In the gradio UI of "gradio_annotator.py" we have the following interfaces:
|
| 12 |
+
|
| 13 |
+
# Annotators
|
| 14 |
+
|
| 15 |
+
### Canny Edge
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+
### Soft Edge
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
|
| 23 |
+
# MLSD
|
| 24 |
+
|
| 25 |
+
The straight line detection.
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+
# Depth
|
| 30 |
+
|
| 31 |
+
The quality of Midas is not as good as Zoe. Midas is faster and less "tricky" - Zoe uses some post-processing.
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
|
| 35 |
+
# Normal
|
| 36 |
+
|
| 37 |
+
[Bae's method](https://github.com/baegwangbin/surface_normal_uncertainty) to detect normal maps.
|
| 38 |
+
|
| 39 |
+
This method is significantly better than ControlNet 1.0's Normal-from-Midas.
|
| 40 |
+
|
| 41 |
+
Note that ControlNet 1.0's Normal-from-Midas will be abandoned, and we will move on to this annotator, and will not maintain the previous Normal-from-Midas anymore.
|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
|
| 45 |
+
# Openpose
|
| 46 |
+
|
| 47 |
+
The ControlNet team made significant efforts to improve this. The differences are
|
| 48 |
+
|
| 49 |
+
(1) we fixed many bugs in previous implementation - and this implementation uses "correct" sampling methods and has better accuracy, especially for hands.
|
| 50 |
+
|
| 51 |
+
(2) Face landmarks are added.
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
|
| 55 |
+
Multiple people in the wild:
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
|
| 59 |
+
# Lineart (Anime)
|
| 60 |
+
|
| 61 |
+
This is Mukosame/Anime2Sketch.
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
|
| 65 |
+
# Lineart (Realistic)
|
| 66 |
+
|
| 67 |
+
This is awacke1/Image-to-Line-Drawings.
|
| 68 |
+
|
| 69 |
+
It turns real photo to linearts. It has two mode: the detailed mode and coarse mode.
|
| 70 |
+
|
| 71 |
+
Detailed mode:
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
|
| 75 |
+
Coarse mode:
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
|
| 79 |
+
# Semantic Segmentation
|
| 80 |
+
|
| 81 |
+
Below is the previous Uniformer in ControlNet 1.0. This model is out-of-date and will be replaced by OneFormer. Nevertheless, uniformer is still a very valid model and can be used in training as data augmentations - perhaps.
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
|
| 85 |
+
This is OneFormer. This method is SOTA. The ControlNet team made significant efforts to remove unnecessary dependencies so that anyone can use this model in nearly any environment without worrying about many difficult dependencies.
|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
|
| 89 |
+
# Content Reshuffle
|
| 90 |
+
|
| 91 |
+
Use a simple random flow to reshuffle the content of an image. This can be used to train style-based ControlNets without being limited to the model scope of Clip-Vision tokenizer.
|
| 92 |
+
|
| 93 |
+
This method was first proposed in [Two-stage Sketch Colorization](https://www.cse.cuhk.edu.hk/~ttwong/papers/colorize/colorize.html) to guide the style of image-to-image translation.
|
| 94 |
+
|
| 95 |
+

|
CCEdit-main/src/controlnet11/ldm/data/__init__.py
ADDED
|
File without changes
|
CCEdit-main/src/controlnet11/ldm/data/util.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from ldm.modules.midas.api import load_midas_transform
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class AddMiDaS(object):
|
| 7 |
+
def __init__(self, model_type):
|
| 8 |
+
super().__init__()
|
| 9 |
+
self.transform = load_midas_transform(model_type)
|
| 10 |
+
|
| 11 |
+
def pt2np(self, x):
|
| 12 |
+
x = ((x + 1.0) * .5).detach().cpu().numpy()
|
| 13 |
+
return x
|
| 14 |
+
|
| 15 |
+
def np2pt(self, x):
|
| 16 |
+
x = torch.from_numpy(x) * 2 - 1.
|
| 17 |
+
return x
|
| 18 |
+
|
| 19 |
+
def __call__(self, sample):
|
| 20 |
+
# sample['jpg'] is tensor hwc in [-1, 1] at this point
|
| 21 |
+
x = self.pt2np(sample['jpg'])
|
| 22 |
+
x = self.transform({"image": x})["image"]
|
| 23 |
+
sample['midas_in'] = x
|
| 24 |
+
return sample
|
CCEdit-main/src/controlnet11/ldm/models/autoencoder.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import pytorch_lightning as pl
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from contextlib import contextmanager
|
| 5 |
+
|
| 6 |
+
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
| 7 |
+
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
|
| 8 |
+
|
| 9 |
+
from ldm.util import instantiate_from_config
|
| 10 |
+
from ldm.modules.ema import LitEma
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class AutoencoderKL(pl.LightningModule):
|
| 14 |
+
def __init__(self,
|
| 15 |
+
ddconfig,
|
| 16 |
+
lossconfig,
|
| 17 |
+
embed_dim,
|
| 18 |
+
ckpt_path=None,
|
| 19 |
+
ignore_keys=[],
|
| 20 |
+
image_key="image",
|
| 21 |
+
colorize_nlabels=None,
|
| 22 |
+
monitor=None,
|
| 23 |
+
ema_decay=None,
|
| 24 |
+
learn_logvar=False
|
| 25 |
+
):
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.learn_logvar = learn_logvar
|
| 28 |
+
self.image_key = image_key
|
| 29 |
+
self.encoder = Encoder(**ddconfig)
|
| 30 |
+
self.decoder = Decoder(**ddconfig)
|
| 31 |
+
self.loss = instantiate_from_config(lossconfig)
|
| 32 |
+
assert ddconfig["double_z"]
|
| 33 |
+
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
|
| 34 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
| 35 |
+
self.embed_dim = embed_dim
|
| 36 |
+
if colorize_nlabels is not None:
|
| 37 |
+
assert type(colorize_nlabels)==int
|
| 38 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
| 39 |
+
if monitor is not None:
|
| 40 |
+
self.monitor = monitor
|
| 41 |
+
|
| 42 |
+
self.use_ema = ema_decay is not None
|
| 43 |
+
if self.use_ema:
|
| 44 |
+
self.ema_decay = ema_decay
|
| 45 |
+
assert 0. < ema_decay < 1.
|
| 46 |
+
self.model_ema = LitEma(self, decay=ema_decay)
|
| 47 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
| 48 |
+
|
| 49 |
+
if ckpt_path is not None:
|
| 50 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
| 51 |
+
|
| 52 |
+
def init_from_ckpt(self, path, ignore_keys=list()):
|
| 53 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 54 |
+
keys = list(sd.keys())
|
| 55 |
+
for k in keys:
|
| 56 |
+
for ik in ignore_keys:
|
| 57 |
+
if k.startswith(ik):
|
| 58 |
+
print("Deleting key {} from state_dict.".format(k))
|
| 59 |
+
del sd[k]
|
| 60 |
+
self.load_state_dict(sd, strict=False)
|
| 61 |
+
print(f"Restored from {path}")
|
| 62 |
+
|
| 63 |
+
@contextmanager
|
| 64 |
+
def ema_scope(self, context=None):
|
| 65 |
+
if self.use_ema:
|
| 66 |
+
self.model_ema.store(self.parameters())
|
| 67 |
+
self.model_ema.copy_to(self)
|
| 68 |
+
if context is not None:
|
| 69 |
+
print(f"{context}: Switched to EMA weights")
|
| 70 |
+
try:
|
| 71 |
+
yield None
|
| 72 |
+
finally:
|
| 73 |
+
if self.use_ema:
|
| 74 |
+
self.model_ema.restore(self.parameters())
|
| 75 |
+
if context is not None:
|
| 76 |
+
print(f"{context}: Restored training weights")
|
| 77 |
+
|
| 78 |
+
def on_train_batch_end(self, *args, **kwargs):
|
| 79 |
+
if self.use_ema:
|
| 80 |
+
self.model_ema(self)
|
| 81 |
+
|
| 82 |
+
def encode(self, x):
|
| 83 |
+
h = self.encoder(x)
|
| 84 |
+
moments = self.quant_conv(h)
|
| 85 |
+
posterior = DiagonalGaussianDistribution(moments)
|
| 86 |
+
return posterior
|
| 87 |
+
|
| 88 |
+
def decode(self, z):
|
| 89 |
+
z = self.post_quant_conv(z)
|
| 90 |
+
dec = self.decoder(z)
|
| 91 |
+
return dec
|
| 92 |
+
|
| 93 |
+
def forward(self, input, sample_posterior=True):
|
| 94 |
+
posterior = self.encode(input)
|
| 95 |
+
if sample_posterior:
|
| 96 |
+
z = posterior.sample()
|
| 97 |
+
else:
|
| 98 |
+
z = posterior.mode()
|
| 99 |
+
dec = self.decode(z)
|
| 100 |
+
return dec, posterior
|
| 101 |
+
|
| 102 |
+
def get_input(self, batch, k):
|
| 103 |
+
x = batch[k]
|
| 104 |
+
if len(x.shape) == 3:
|
| 105 |
+
x = x[..., None]
|
| 106 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
| 107 |
+
return x
|
| 108 |
+
|
| 109 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
| 110 |
+
inputs = self.get_input(batch, self.image_key)
|
| 111 |
+
reconstructions, posterior = self(inputs)
|
| 112 |
+
|
| 113 |
+
if optimizer_idx == 0:
|
| 114 |
+
# train encoder+decoder+logvar
|
| 115 |
+
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
| 116 |
+
last_layer=self.get_last_layer(), split="train")
|
| 117 |
+
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 118 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
| 119 |
+
return aeloss
|
| 120 |
+
|
| 121 |
+
if optimizer_idx == 1:
|
| 122 |
+
# train the discriminator
|
| 123 |
+
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
| 124 |
+
last_layer=self.get_last_layer(), split="train")
|
| 125 |
+
|
| 126 |
+
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 127 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
| 128 |
+
return discloss
|
| 129 |
+
|
| 130 |
+
def validation_step(self, batch, batch_idx):
|
| 131 |
+
log_dict = self._validation_step(batch, batch_idx)
|
| 132 |
+
with self.ema_scope():
|
| 133 |
+
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
|
| 134 |
+
return log_dict
|
| 135 |
+
|
| 136 |
+
def _validation_step(self, batch, batch_idx, postfix=""):
|
| 137 |
+
inputs = self.get_input(batch, self.image_key)
|
| 138 |
+
reconstructions, posterior = self(inputs)
|
| 139 |
+
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
|
| 140 |
+
last_layer=self.get_last_layer(), split="val"+postfix)
|
| 141 |
+
|
| 142 |
+
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
|
| 143 |
+
last_layer=self.get_last_layer(), split="val"+postfix)
|
| 144 |
+
|
| 145 |
+
self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
|
| 146 |
+
self.log_dict(log_dict_ae)
|
| 147 |
+
self.log_dict(log_dict_disc)
|
| 148 |
+
return self.log_dict
|
| 149 |
+
|
| 150 |
+
def configure_optimizers(self):
|
| 151 |
+
lr = self.learning_rate
|
| 152 |
+
ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
|
| 153 |
+
self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
|
| 154 |
+
if self.learn_logvar:
|
| 155 |
+
print(f"{self.__class__.__name__}: Learning logvar")
|
| 156 |
+
ae_params_list.append(self.loss.logvar)
|
| 157 |
+
opt_ae = torch.optim.Adam(ae_params_list,
|
| 158 |
+
lr=lr, betas=(0.5, 0.9))
|
| 159 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
| 160 |
+
lr=lr, betas=(0.5, 0.9))
|
| 161 |
+
return [opt_ae, opt_disc], []
|
| 162 |
+
|
| 163 |
+
def get_last_layer(self):
|
| 164 |
+
return self.decoder.conv_out.weight
|
| 165 |
+
|
| 166 |
+
@torch.no_grad()
|
| 167 |
+
def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
|
| 168 |
+
log = dict()
|
| 169 |
+
x = self.get_input(batch, self.image_key)
|
| 170 |
+
x = x.to(self.device)
|
| 171 |
+
if not only_inputs:
|
| 172 |
+
xrec, posterior = self(x)
|
| 173 |
+
if x.shape[1] > 3:
|
| 174 |
+
# colorize with random projection
|
| 175 |
+
assert xrec.shape[1] > 3
|
| 176 |
+
x = self.to_rgb(x)
|
| 177 |
+
xrec = self.to_rgb(xrec)
|
| 178 |
+
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
|
| 179 |
+
log["reconstructions"] = xrec
|
| 180 |
+
if log_ema or self.use_ema:
|
| 181 |
+
with self.ema_scope():
|
| 182 |
+
xrec_ema, posterior_ema = self(x)
|
| 183 |
+
if x.shape[1] > 3:
|
| 184 |
+
# colorize with random projection
|
| 185 |
+
assert xrec_ema.shape[1] > 3
|
| 186 |
+
xrec_ema = self.to_rgb(xrec_ema)
|
| 187 |
+
log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
|
| 188 |
+
log["reconstructions_ema"] = xrec_ema
|
| 189 |
+
log["inputs"] = x
|
| 190 |
+
return log
|
| 191 |
+
|
| 192 |
+
def to_rgb(self, x):
|
| 193 |
+
assert self.image_key == "segmentation"
|
| 194 |
+
if not hasattr(self, "colorize"):
|
| 195 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
| 196 |
+
x = F.conv2d(x, weight=self.colorize)
|
| 197 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
| 198 |
+
return x
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class IdentityFirstStage(torch.nn.Module):
|
| 202 |
+
def __init__(self, *args, vq_interface=False, **kwargs):
|
| 203 |
+
self.vq_interface = vq_interface
|
| 204 |
+
super().__init__()
|
| 205 |
+
|
| 206 |
+
def encode(self, x, *args, **kwargs):
|
| 207 |
+
return x
|
| 208 |
+
|
| 209 |
+
def decode(self, x, *args, **kwargs):
|
| 210 |
+
return x
|
| 211 |
+
|
| 212 |
+
def quantize(self, x, *args, **kwargs):
|
| 213 |
+
if self.vq_interface:
|
| 214 |
+
return x, None, [None, None, None]
|
| 215 |
+
return x
|
| 216 |
+
|
| 217 |
+
def forward(self, x, *args, **kwargs):
|
| 218 |
+
return x
|
| 219 |
+
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/__init__.py
ADDED
|
File without changes
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/ddim.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SAMPLING ONLY."""
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
|
| 7 |
+
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DDIMSampler(object):
|
| 11 |
+
def __init__(self, model, schedule="linear", **kwargs):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.model = model
|
| 14 |
+
self.ddpm_num_timesteps = model.num_timesteps
|
| 15 |
+
self.schedule = schedule
|
| 16 |
+
|
| 17 |
+
def register_buffer(self, name, attr):
|
| 18 |
+
if type(attr) == torch.Tensor:
|
| 19 |
+
if attr.device != torch.device("cuda"):
|
| 20 |
+
attr = attr.to(torch.device("cuda"))
|
| 21 |
+
setattr(self, name, attr)
|
| 22 |
+
|
| 23 |
+
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
| 24 |
+
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
| 25 |
+
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
| 26 |
+
alphas_cumprod = self.model.alphas_cumprod
|
| 27 |
+
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
| 28 |
+
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
| 29 |
+
|
| 30 |
+
self.register_buffer('betas', to_torch(self.model.betas))
|
| 31 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
| 32 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
| 33 |
+
|
| 34 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
| 35 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
| 36 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
| 37 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
| 38 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
| 39 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
| 40 |
+
|
| 41 |
+
# ddim sampling parameters
|
| 42 |
+
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
| 43 |
+
ddim_timesteps=self.ddim_timesteps,
|
| 44 |
+
eta=ddim_eta,verbose=verbose)
|
| 45 |
+
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
| 46 |
+
self.register_buffer('ddim_alphas', ddim_alphas)
|
| 47 |
+
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
| 48 |
+
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
| 49 |
+
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
| 50 |
+
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
| 51 |
+
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
| 52 |
+
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
| 53 |
+
|
| 54 |
+
@torch.no_grad()
|
| 55 |
+
def sample(self,
|
| 56 |
+
S,
|
| 57 |
+
batch_size,
|
| 58 |
+
shape,
|
| 59 |
+
conditioning=None,
|
| 60 |
+
callback=None,
|
| 61 |
+
normals_sequence=None,
|
| 62 |
+
img_callback=None,
|
| 63 |
+
quantize_x0=False,
|
| 64 |
+
eta=0.,
|
| 65 |
+
mask=None,
|
| 66 |
+
x0=None,
|
| 67 |
+
temperature=1.,
|
| 68 |
+
noise_dropout=0.,
|
| 69 |
+
score_corrector=None,
|
| 70 |
+
corrector_kwargs=None,
|
| 71 |
+
verbose=True,
|
| 72 |
+
x_T=None,
|
| 73 |
+
log_every_t=100,
|
| 74 |
+
unconditional_guidance_scale=1.,
|
| 75 |
+
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
| 76 |
+
dynamic_threshold=None,
|
| 77 |
+
ucg_schedule=None,
|
| 78 |
+
**kwargs
|
| 79 |
+
):
|
| 80 |
+
if conditioning is not None:
|
| 81 |
+
if isinstance(conditioning, dict):
|
| 82 |
+
ctmp = conditioning[list(conditioning.keys())[0]]
|
| 83 |
+
while isinstance(ctmp, list): ctmp = ctmp[0]
|
| 84 |
+
cbs = ctmp.shape[0]
|
| 85 |
+
if cbs != batch_size:
|
| 86 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 87 |
+
|
| 88 |
+
elif isinstance(conditioning, list):
|
| 89 |
+
for ctmp in conditioning:
|
| 90 |
+
if ctmp.shape[0] != batch_size:
|
| 91 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 92 |
+
|
| 93 |
+
else:
|
| 94 |
+
if conditioning.shape[0] != batch_size:
|
| 95 |
+
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
| 96 |
+
|
| 97 |
+
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
| 98 |
+
# sampling
|
| 99 |
+
C, H, W = shape
|
| 100 |
+
size = (batch_size, C, H, W)
|
| 101 |
+
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
|
| 102 |
+
|
| 103 |
+
samples, intermediates = self.ddim_sampling(conditioning, size,
|
| 104 |
+
callback=callback,
|
| 105 |
+
img_callback=img_callback,
|
| 106 |
+
quantize_denoised=quantize_x0,
|
| 107 |
+
mask=mask, x0=x0,
|
| 108 |
+
ddim_use_original_steps=False,
|
| 109 |
+
noise_dropout=noise_dropout,
|
| 110 |
+
temperature=temperature,
|
| 111 |
+
score_corrector=score_corrector,
|
| 112 |
+
corrector_kwargs=corrector_kwargs,
|
| 113 |
+
x_T=x_T,
|
| 114 |
+
log_every_t=log_every_t,
|
| 115 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 116 |
+
unconditional_conditioning=unconditional_conditioning,
|
| 117 |
+
dynamic_threshold=dynamic_threshold,
|
| 118 |
+
ucg_schedule=ucg_schedule
|
| 119 |
+
)
|
| 120 |
+
return samples, intermediates
|
| 121 |
+
|
| 122 |
+
@torch.no_grad()
|
| 123 |
+
def ddim_sampling(self, cond, shape,
|
| 124 |
+
x_T=None, ddim_use_original_steps=False,
|
| 125 |
+
callback=None, timesteps=None, quantize_denoised=False,
|
| 126 |
+
mask=None, x0=None, img_callback=None, log_every_t=100,
|
| 127 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
| 128 |
+
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
|
| 129 |
+
ucg_schedule=None):
|
| 130 |
+
device = self.model.betas.device
|
| 131 |
+
b = shape[0]
|
| 132 |
+
if x_T is None:
|
| 133 |
+
img = torch.randn(shape, device=device)
|
| 134 |
+
else:
|
| 135 |
+
img = x_T
|
| 136 |
+
|
| 137 |
+
if timesteps is None:
|
| 138 |
+
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
| 139 |
+
elif timesteps is not None and not ddim_use_original_steps:
|
| 140 |
+
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
| 141 |
+
timesteps = self.ddim_timesteps[:subset_end]
|
| 142 |
+
|
| 143 |
+
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
| 144 |
+
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
|
| 145 |
+
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
| 146 |
+
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
| 147 |
+
|
| 148 |
+
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
|
| 149 |
+
|
| 150 |
+
for i, step in enumerate(iterator):
|
| 151 |
+
index = total_steps - i - 1
|
| 152 |
+
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
| 153 |
+
|
| 154 |
+
if mask is not None:
|
| 155 |
+
assert x0 is not None
|
| 156 |
+
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
| 157 |
+
img = img_orig * mask + (1. - mask) * img
|
| 158 |
+
|
| 159 |
+
if ucg_schedule is not None:
|
| 160 |
+
assert len(ucg_schedule) == len(time_range)
|
| 161 |
+
unconditional_guidance_scale = ucg_schedule[i]
|
| 162 |
+
|
| 163 |
+
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
| 164 |
+
quantize_denoised=quantize_denoised, temperature=temperature,
|
| 165 |
+
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
| 166 |
+
corrector_kwargs=corrector_kwargs,
|
| 167 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 168 |
+
unconditional_conditioning=unconditional_conditioning,
|
| 169 |
+
dynamic_threshold=dynamic_threshold)
|
| 170 |
+
img, pred_x0 = outs
|
| 171 |
+
if callback: callback(i)
|
| 172 |
+
if img_callback: img_callback(pred_x0, i)
|
| 173 |
+
|
| 174 |
+
if index % log_every_t == 0 or index == total_steps - 1:
|
| 175 |
+
intermediates['x_inter'].append(img)
|
| 176 |
+
intermediates['pred_x0'].append(pred_x0)
|
| 177 |
+
|
| 178 |
+
return img, intermediates
|
| 179 |
+
|
| 180 |
+
@torch.no_grad()
|
| 181 |
+
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
| 182 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
| 183 |
+
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
| 184 |
+
dynamic_threshold=None):
|
| 185 |
+
b, *_, device = *x.shape, x.device
|
| 186 |
+
|
| 187 |
+
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
| 188 |
+
model_output = self.model.apply_model(x, t, c)
|
| 189 |
+
else:
|
| 190 |
+
x_in = torch.cat([x] * 2)
|
| 191 |
+
t_in = torch.cat([t] * 2)
|
| 192 |
+
if isinstance(c, dict):
|
| 193 |
+
assert isinstance(unconditional_conditioning, dict)
|
| 194 |
+
c_in = dict()
|
| 195 |
+
for k in c:
|
| 196 |
+
if isinstance(c[k], list):
|
| 197 |
+
c_in[k] = [torch.cat([
|
| 198 |
+
unconditional_conditioning[k][i],
|
| 199 |
+
c[k][i]]) for i in range(len(c[k]))]
|
| 200 |
+
else:
|
| 201 |
+
c_in[k] = torch.cat([
|
| 202 |
+
unconditional_conditioning[k],
|
| 203 |
+
c[k]])
|
| 204 |
+
elif isinstance(c, list):
|
| 205 |
+
c_in = list()
|
| 206 |
+
assert isinstance(unconditional_conditioning, list)
|
| 207 |
+
for i in range(len(c)):
|
| 208 |
+
c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))
|
| 209 |
+
else:
|
| 210 |
+
c_in = torch.cat([unconditional_conditioning, c])
|
| 211 |
+
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
| 212 |
+
model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
|
| 213 |
+
|
| 214 |
+
if self.model.parameterization == "v":
|
| 215 |
+
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
|
| 216 |
+
else:
|
| 217 |
+
e_t = model_output
|
| 218 |
+
|
| 219 |
+
if score_corrector is not None:
|
| 220 |
+
assert self.model.parameterization == "eps", 'not implemented'
|
| 221 |
+
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
| 222 |
+
|
| 223 |
+
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
| 224 |
+
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
| 225 |
+
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
| 226 |
+
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
| 227 |
+
# select parameters corresponding to the currently considered timestep
|
| 228 |
+
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
| 229 |
+
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
| 230 |
+
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
| 231 |
+
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
| 232 |
+
|
| 233 |
+
# current prediction for x_0
|
| 234 |
+
if self.model.parameterization != "v":
|
| 235 |
+
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
| 236 |
+
else:
|
| 237 |
+
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
|
| 238 |
+
|
| 239 |
+
if quantize_denoised:
|
| 240 |
+
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
| 241 |
+
|
| 242 |
+
if dynamic_threshold is not None:
|
| 243 |
+
raise NotImplementedError()
|
| 244 |
+
|
| 245 |
+
# direction pointing to x_t
|
| 246 |
+
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
| 247 |
+
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
| 248 |
+
if noise_dropout > 0.:
|
| 249 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
| 250 |
+
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
| 251 |
+
return x_prev, pred_x0
|
| 252 |
+
|
| 253 |
+
@torch.no_grad()
|
| 254 |
+
def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
|
| 255 |
+
unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
|
| 256 |
+
num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
|
| 257 |
+
|
| 258 |
+
assert t_enc <= num_reference_steps
|
| 259 |
+
num_steps = t_enc
|
| 260 |
+
|
| 261 |
+
if use_original_steps:
|
| 262 |
+
alphas_next = self.alphas_cumprod[:num_steps]
|
| 263 |
+
alphas = self.alphas_cumprod_prev[:num_steps]
|
| 264 |
+
else:
|
| 265 |
+
alphas_next = self.ddim_alphas[:num_steps]
|
| 266 |
+
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
|
| 267 |
+
|
| 268 |
+
x_next = x0
|
| 269 |
+
intermediates = []
|
| 270 |
+
inter_steps = []
|
| 271 |
+
for i in tqdm(range(num_steps), desc='Encoding Image'):
|
| 272 |
+
t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
|
| 273 |
+
if unconditional_guidance_scale == 1.:
|
| 274 |
+
noise_pred = self.model.apply_model(x_next, t, c)
|
| 275 |
+
else:
|
| 276 |
+
assert unconditional_conditioning is not None
|
| 277 |
+
e_t_uncond, noise_pred = torch.chunk(
|
| 278 |
+
self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
|
| 279 |
+
torch.cat((unconditional_conditioning, c))), 2)
|
| 280 |
+
noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
|
| 281 |
+
|
| 282 |
+
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
|
| 283 |
+
weighted_noise_pred = alphas_next[i].sqrt() * (
|
| 284 |
+
(1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
|
| 285 |
+
x_next = xt_weighted + weighted_noise_pred
|
| 286 |
+
if return_intermediates and i % (
|
| 287 |
+
num_steps // return_intermediates) == 0 and i < num_steps - 1:
|
| 288 |
+
intermediates.append(x_next)
|
| 289 |
+
inter_steps.append(i)
|
| 290 |
+
elif return_intermediates and i >= num_steps - 2:
|
| 291 |
+
intermediates.append(x_next)
|
| 292 |
+
inter_steps.append(i)
|
| 293 |
+
if callback: callback(i)
|
| 294 |
+
|
| 295 |
+
out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
|
| 296 |
+
if return_intermediates:
|
| 297 |
+
out.update({'intermediates': intermediates})
|
| 298 |
+
return x_next, out
|
| 299 |
+
|
| 300 |
+
@torch.no_grad()
|
| 301 |
+
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
|
| 302 |
+
# fast, but does not allow for exact reconstruction
|
| 303 |
+
# t serves as an index to gather the correct alphas
|
| 304 |
+
if use_original_steps:
|
| 305 |
+
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
|
| 306 |
+
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
|
| 307 |
+
else:
|
| 308 |
+
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
|
| 309 |
+
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
|
| 310 |
+
|
| 311 |
+
if noise is None:
|
| 312 |
+
noise = torch.randn_like(x0)
|
| 313 |
+
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
|
| 314 |
+
extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
|
| 315 |
+
|
| 316 |
+
@torch.no_grad()
|
| 317 |
+
def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
| 318 |
+
use_original_steps=False, callback=None):
|
| 319 |
+
|
| 320 |
+
timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
|
| 321 |
+
timesteps = timesteps[:t_start]
|
| 322 |
+
|
| 323 |
+
time_range = np.flip(timesteps)
|
| 324 |
+
total_steps = timesteps.shape[0]
|
| 325 |
+
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
| 326 |
+
|
| 327 |
+
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
|
| 328 |
+
x_dec = x_latent
|
| 329 |
+
for i, step in enumerate(iterator):
|
| 330 |
+
index = total_steps - i - 1
|
| 331 |
+
ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
|
| 332 |
+
x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
|
| 333 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 334 |
+
unconditional_conditioning=unconditional_conditioning)
|
| 335 |
+
if callback: callback(i)
|
| 336 |
+
return x_dec
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/ddpm.py
ADDED
|
@@ -0,0 +1,1797 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
wild mixture of
|
| 3 |
+
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
| 4 |
+
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
|
| 5 |
+
https://github.com/CompVis/taming-transformers
|
| 6 |
+
-- merci
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import numpy as np
|
| 12 |
+
import pytorch_lightning as pl
|
| 13 |
+
from torch.optim.lr_scheduler import LambdaLR
|
| 14 |
+
from einops import rearrange, repeat
|
| 15 |
+
from contextlib import contextmanager, nullcontext
|
| 16 |
+
from functools import partial
|
| 17 |
+
import itertools
|
| 18 |
+
from tqdm import tqdm
|
| 19 |
+
from torchvision.utils import make_grid
|
| 20 |
+
from pytorch_lightning.utilities.distributed import rank_zero_only
|
| 21 |
+
from omegaconf import ListConfig
|
| 22 |
+
|
| 23 |
+
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
| 24 |
+
from ldm.modules.ema import LitEma
|
| 25 |
+
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
|
| 26 |
+
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
|
| 27 |
+
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
| 28 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
__conditioning_keys__ = {'concat': 'c_concat',
|
| 32 |
+
'crossattn': 'c_crossattn',
|
| 33 |
+
'adm': 'y'}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def disabled_train(self, mode=True):
|
| 37 |
+
"""Overwrite model.train with this function to make sure train/eval mode
|
| 38 |
+
does not change anymore."""
|
| 39 |
+
return self
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def uniform_on_device(r1, r2, shape, device):
|
| 43 |
+
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class DDPM(pl.LightningModule):
|
| 47 |
+
# classic DDPM with Gaussian diffusion, in image space
|
| 48 |
+
def __init__(self,
|
| 49 |
+
unet_config,
|
| 50 |
+
timesteps=1000,
|
| 51 |
+
beta_schedule="linear",
|
| 52 |
+
loss_type="l2",
|
| 53 |
+
ckpt_path=None,
|
| 54 |
+
ignore_keys=[],
|
| 55 |
+
load_only_unet=False,
|
| 56 |
+
monitor="val/loss",
|
| 57 |
+
use_ema=True,
|
| 58 |
+
first_stage_key="image",
|
| 59 |
+
image_size=256,
|
| 60 |
+
channels=3,
|
| 61 |
+
log_every_t=100,
|
| 62 |
+
clip_denoised=True,
|
| 63 |
+
linear_start=1e-4,
|
| 64 |
+
linear_end=2e-2,
|
| 65 |
+
cosine_s=8e-3,
|
| 66 |
+
given_betas=None,
|
| 67 |
+
original_elbo_weight=0.,
|
| 68 |
+
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
| 69 |
+
l_simple_weight=1.,
|
| 70 |
+
conditioning_key=None,
|
| 71 |
+
parameterization="eps", # all assuming fixed variance schedules
|
| 72 |
+
scheduler_config=None,
|
| 73 |
+
use_positional_encodings=False,
|
| 74 |
+
learn_logvar=False,
|
| 75 |
+
logvar_init=0.,
|
| 76 |
+
make_it_fit=False,
|
| 77 |
+
ucg_training=None,
|
| 78 |
+
reset_ema=False,
|
| 79 |
+
reset_num_ema_updates=False,
|
| 80 |
+
):
|
| 81 |
+
super().__init__()
|
| 82 |
+
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
|
| 83 |
+
self.parameterization = parameterization
|
| 84 |
+
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
| 85 |
+
self.cond_stage_model = None
|
| 86 |
+
self.clip_denoised = clip_denoised
|
| 87 |
+
self.log_every_t = log_every_t
|
| 88 |
+
self.first_stage_key = first_stage_key
|
| 89 |
+
self.image_size = image_size # try conv?
|
| 90 |
+
self.channels = channels
|
| 91 |
+
self.use_positional_encodings = use_positional_encodings
|
| 92 |
+
self.model = DiffusionWrapper(unet_config, conditioning_key)
|
| 93 |
+
count_params(self.model, verbose=True)
|
| 94 |
+
self.use_ema = use_ema
|
| 95 |
+
if self.use_ema:
|
| 96 |
+
self.model_ema = LitEma(self.model)
|
| 97 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
| 98 |
+
|
| 99 |
+
self.use_scheduler = scheduler_config is not None
|
| 100 |
+
if self.use_scheduler:
|
| 101 |
+
self.scheduler_config = scheduler_config
|
| 102 |
+
|
| 103 |
+
self.v_posterior = v_posterior
|
| 104 |
+
self.original_elbo_weight = original_elbo_weight
|
| 105 |
+
self.l_simple_weight = l_simple_weight
|
| 106 |
+
|
| 107 |
+
if monitor is not None:
|
| 108 |
+
self.monitor = monitor
|
| 109 |
+
self.make_it_fit = make_it_fit
|
| 110 |
+
if reset_ema: assert exists(ckpt_path)
|
| 111 |
+
if ckpt_path is not None:
|
| 112 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
|
| 113 |
+
if reset_ema:
|
| 114 |
+
assert self.use_ema
|
| 115 |
+
print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
|
| 116 |
+
self.model_ema = LitEma(self.model)
|
| 117 |
+
if reset_num_ema_updates:
|
| 118 |
+
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
|
| 119 |
+
assert self.use_ema
|
| 120 |
+
self.model_ema.reset_num_updates()
|
| 121 |
+
|
| 122 |
+
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
| 123 |
+
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
| 124 |
+
|
| 125 |
+
self.loss_type = loss_type
|
| 126 |
+
|
| 127 |
+
self.learn_logvar = learn_logvar
|
| 128 |
+
logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
|
| 129 |
+
if self.learn_logvar:
|
| 130 |
+
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
| 131 |
+
else:
|
| 132 |
+
self.register_buffer('logvar', logvar)
|
| 133 |
+
|
| 134 |
+
self.ucg_training = ucg_training or dict()
|
| 135 |
+
if self.ucg_training:
|
| 136 |
+
self.ucg_prng = np.random.RandomState()
|
| 137 |
+
|
| 138 |
+
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
| 139 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
| 140 |
+
if exists(given_betas):
|
| 141 |
+
betas = given_betas
|
| 142 |
+
else:
|
| 143 |
+
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
| 144 |
+
cosine_s=cosine_s)
|
| 145 |
+
alphas = 1. - betas
|
| 146 |
+
alphas_cumprod = np.cumprod(alphas, axis=0)
|
| 147 |
+
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
| 148 |
+
|
| 149 |
+
timesteps, = betas.shape
|
| 150 |
+
self.num_timesteps = int(timesteps)
|
| 151 |
+
self.linear_start = linear_start
|
| 152 |
+
self.linear_end = linear_end
|
| 153 |
+
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
| 154 |
+
|
| 155 |
+
to_torch = partial(torch.tensor, dtype=torch.float32)
|
| 156 |
+
|
| 157 |
+
self.register_buffer('betas', to_torch(betas))
|
| 158 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
| 159 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
| 160 |
+
|
| 161 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
| 162 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
| 163 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
| 164 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
| 165 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
| 166 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
| 167 |
+
|
| 168 |
+
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
| 169 |
+
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
|
| 170 |
+
1. - alphas_cumprod) + self.v_posterior * betas
|
| 171 |
+
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
| 172 |
+
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
| 173 |
+
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
| 174 |
+
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
| 175 |
+
self.register_buffer('posterior_mean_coef1', to_torch(
|
| 176 |
+
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
| 177 |
+
self.register_buffer('posterior_mean_coef2', to_torch(
|
| 178 |
+
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
| 179 |
+
|
| 180 |
+
if self.parameterization == "eps":
|
| 181 |
+
lvlb_weights = self.betas ** 2 / (
|
| 182 |
+
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
|
| 183 |
+
elif self.parameterization == "x0":
|
| 184 |
+
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
|
| 185 |
+
elif self.parameterization == "v":
|
| 186 |
+
lvlb_weights = torch.ones_like(self.betas ** 2 / (
|
| 187 |
+
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
|
| 188 |
+
else:
|
| 189 |
+
raise NotImplementedError("mu not supported")
|
| 190 |
+
lvlb_weights[0] = lvlb_weights[1]
|
| 191 |
+
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
| 192 |
+
assert not torch.isnan(self.lvlb_weights).all()
|
| 193 |
+
|
| 194 |
+
@contextmanager
|
| 195 |
+
def ema_scope(self, context=None):
|
| 196 |
+
if self.use_ema:
|
| 197 |
+
self.model_ema.store(self.model.parameters())
|
| 198 |
+
self.model_ema.copy_to(self.model)
|
| 199 |
+
if context is not None:
|
| 200 |
+
print(f"{context}: Switched to EMA weights")
|
| 201 |
+
try:
|
| 202 |
+
yield None
|
| 203 |
+
finally:
|
| 204 |
+
if self.use_ema:
|
| 205 |
+
self.model_ema.restore(self.model.parameters())
|
| 206 |
+
if context is not None:
|
| 207 |
+
print(f"{context}: Restored training weights")
|
| 208 |
+
|
| 209 |
+
@torch.no_grad()
|
| 210 |
+
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
| 211 |
+
sd = torch.load(path, map_location="cpu")
|
| 212 |
+
if "state_dict" in list(sd.keys()):
|
| 213 |
+
sd = sd["state_dict"]
|
| 214 |
+
keys = list(sd.keys())
|
| 215 |
+
for k in keys:
|
| 216 |
+
for ik in ignore_keys:
|
| 217 |
+
if k.startswith(ik):
|
| 218 |
+
print("Deleting key {} from state_dict.".format(k))
|
| 219 |
+
del sd[k]
|
| 220 |
+
if self.make_it_fit:
|
| 221 |
+
n_params = len([name for name, _ in
|
| 222 |
+
itertools.chain(self.named_parameters(),
|
| 223 |
+
self.named_buffers())])
|
| 224 |
+
for name, param in tqdm(
|
| 225 |
+
itertools.chain(self.named_parameters(),
|
| 226 |
+
self.named_buffers()),
|
| 227 |
+
desc="Fitting old weights to new weights",
|
| 228 |
+
total=n_params
|
| 229 |
+
):
|
| 230 |
+
if not name in sd:
|
| 231 |
+
continue
|
| 232 |
+
old_shape = sd[name].shape
|
| 233 |
+
new_shape = param.shape
|
| 234 |
+
assert len(old_shape) == len(new_shape)
|
| 235 |
+
if len(new_shape) > 2:
|
| 236 |
+
# we only modify first two axes
|
| 237 |
+
assert new_shape[2:] == old_shape[2:]
|
| 238 |
+
# assumes first axis corresponds to output dim
|
| 239 |
+
if not new_shape == old_shape:
|
| 240 |
+
new_param = param.clone()
|
| 241 |
+
old_param = sd[name]
|
| 242 |
+
if len(new_shape) == 1:
|
| 243 |
+
for i in range(new_param.shape[0]):
|
| 244 |
+
new_param[i] = old_param[i % old_shape[0]]
|
| 245 |
+
elif len(new_shape) >= 2:
|
| 246 |
+
for i in range(new_param.shape[0]):
|
| 247 |
+
for j in range(new_param.shape[1]):
|
| 248 |
+
new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
|
| 249 |
+
|
| 250 |
+
n_used_old = torch.ones(old_shape[1])
|
| 251 |
+
for j in range(new_param.shape[1]):
|
| 252 |
+
n_used_old[j % old_shape[1]] += 1
|
| 253 |
+
n_used_new = torch.zeros(new_shape[1])
|
| 254 |
+
for j in range(new_param.shape[1]):
|
| 255 |
+
n_used_new[j] = n_used_old[j % old_shape[1]]
|
| 256 |
+
|
| 257 |
+
n_used_new = n_used_new[None, :]
|
| 258 |
+
while len(n_used_new.shape) < len(new_shape):
|
| 259 |
+
n_used_new = n_used_new.unsqueeze(-1)
|
| 260 |
+
new_param /= n_used_new
|
| 261 |
+
|
| 262 |
+
sd[name] = new_param
|
| 263 |
+
|
| 264 |
+
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
| 265 |
+
sd, strict=False)
|
| 266 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
| 267 |
+
if len(missing) > 0:
|
| 268 |
+
print(f"Missing Keys:\n {missing}")
|
| 269 |
+
if len(unexpected) > 0:
|
| 270 |
+
print(f"\nUnexpected Keys:\n {unexpected}")
|
| 271 |
+
|
| 272 |
+
def q_mean_variance(self, x_start, t):
|
| 273 |
+
"""
|
| 274 |
+
Get the distribution q(x_t | x_0).
|
| 275 |
+
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
| 276 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
| 277 |
+
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
| 278 |
+
"""
|
| 279 |
+
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
|
| 280 |
+
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
| 281 |
+
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
| 282 |
+
return mean, variance, log_variance
|
| 283 |
+
|
| 284 |
+
def predict_start_from_noise(self, x_t, t, noise):
|
| 285 |
+
return (
|
| 286 |
+
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
| 287 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
def predict_start_from_z_and_v(self, x_t, t, v):
|
| 291 |
+
# self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
| 292 |
+
# self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
| 293 |
+
return (
|
| 294 |
+
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
|
| 295 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
def predict_eps_from_z_and_v(self, x_t, t, v):
|
| 299 |
+
return (
|
| 300 |
+
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
|
| 301 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
def q_posterior(self, x_start, x_t, t):
|
| 305 |
+
posterior_mean = (
|
| 306 |
+
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
| 307 |
+
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
| 308 |
+
)
|
| 309 |
+
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
| 310 |
+
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
|
| 311 |
+
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
| 312 |
+
|
| 313 |
+
def p_mean_variance(self, x, t, clip_denoised: bool):
|
| 314 |
+
model_out = self.model(x, t)
|
| 315 |
+
if self.parameterization == "eps":
|
| 316 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
| 317 |
+
elif self.parameterization == "x0":
|
| 318 |
+
x_recon = model_out
|
| 319 |
+
if clip_denoised:
|
| 320 |
+
x_recon.clamp_(-1., 1.)
|
| 321 |
+
|
| 322 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
| 323 |
+
return model_mean, posterior_variance, posterior_log_variance
|
| 324 |
+
|
| 325 |
+
@torch.no_grad()
|
| 326 |
+
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
| 327 |
+
b, *_, device = *x.shape, x.device
|
| 328 |
+
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
|
| 329 |
+
noise = noise_like(x.shape, device, repeat_noise)
|
| 330 |
+
# no noise when t == 0
|
| 331 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
| 332 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
| 333 |
+
|
| 334 |
+
@torch.no_grad()
|
| 335 |
+
def p_sample_loop(self, shape, return_intermediates=False):
|
| 336 |
+
device = self.betas.device
|
| 337 |
+
b = shape[0]
|
| 338 |
+
img = torch.randn(shape, device=device)
|
| 339 |
+
intermediates = [img]
|
| 340 |
+
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
|
| 341 |
+
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
|
| 342 |
+
clip_denoised=self.clip_denoised)
|
| 343 |
+
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
| 344 |
+
intermediates.append(img)
|
| 345 |
+
if return_intermediates:
|
| 346 |
+
return img, intermediates
|
| 347 |
+
return img
|
| 348 |
+
|
| 349 |
+
@torch.no_grad()
|
| 350 |
+
def sample(self, batch_size=16, return_intermediates=False):
|
| 351 |
+
image_size = self.image_size
|
| 352 |
+
channels = self.channels
|
| 353 |
+
return self.p_sample_loop((batch_size, channels, image_size, image_size),
|
| 354 |
+
return_intermediates=return_intermediates)
|
| 355 |
+
|
| 356 |
+
def q_sample(self, x_start, t, noise=None):
|
| 357 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
| 358 |
+
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
| 359 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
| 360 |
+
|
| 361 |
+
def get_v(self, x, noise, t):
|
| 362 |
+
return (
|
| 363 |
+
extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
|
| 364 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
def get_loss(self, pred, target, mean=True):
|
| 368 |
+
if self.loss_type == 'l1':
|
| 369 |
+
loss = (target - pred).abs()
|
| 370 |
+
if mean:
|
| 371 |
+
loss = loss.mean()
|
| 372 |
+
elif self.loss_type == 'l2':
|
| 373 |
+
if mean:
|
| 374 |
+
loss = torch.nn.functional.mse_loss(target, pred)
|
| 375 |
+
else:
|
| 376 |
+
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
|
| 377 |
+
else:
|
| 378 |
+
raise NotImplementedError("unknown loss type '{loss_type}'")
|
| 379 |
+
|
| 380 |
+
return loss
|
| 381 |
+
|
| 382 |
+
def p_losses(self, x_start, t, noise=None):
|
| 383 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
| 384 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
| 385 |
+
model_out = self.model(x_noisy, t)
|
| 386 |
+
|
| 387 |
+
loss_dict = {}
|
| 388 |
+
if self.parameterization == "eps":
|
| 389 |
+
target = noise
|
| 390 |
+
elif self.parameterization == "x0":
|
| 391 |
+
target = x_start
|
| 392 |
+
elif self.parameterization == "v":
|
| 393 |
+
target = self.get_v(x_start, noise, t)
|
| 394 |
+
else:
|
| 395 |
+
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
|
| 396 |
+
|
| 397 |
+
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
| 398 |
+
|
| 399 |
+
log_prefix = 'train' if self.training else 'val'
|
| 400 |
+
|
| 401 |
+
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
| 402 |
+
loss_simple = loss.mean() * self.l_simple_weight
|
| 403 |
+
|
| 404 |
+
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
| 405 |
+
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
| 406 |
+
|
| 407 |
+
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
| 408 |
+
|
| 409 |
+
loss_dict.update({f'{log_prefix}/loss': loss})
|
| 410 |
+
|
| 411 |
+
return loss, loss_dict
|
| 412 |
+
|
| 413 |
+
def forward(self, x, *args, **kwargs):
|
| 414 |
+
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
| 415 |
+
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
| 416 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
| 417 |
+
return self.p_losses(x, t, *args, **kwargs)
|
| 418 |
+
|
| 419 |
+
def get_input(self, batch, k):
|
| 420 |
+
x = batch[k]
|
| 421 |
+
if len(x.shape) == 3:
|
| 422 |
+
x = x[..., None]
|
| 423 |
+
x = rearrange(x, 'b h w c -> b c h w')
|
| 424 |
+
x = x.to(memory_format=torch.contiguous_format).float()
|
| 425 |
+
return x
|
| 426 |
+
|
| 427 |
+
def shared_step(self, batch):
|
| 428 |
+
x = self.get_input(batch, self.first_stage_key)
|
| 429 |
+
loss, loss_dict = self(x)
|
| 430 |
+
return loss, loss_dict
|
| 431 |
+
|
| 432 |
+
def training_step(self, batch, batch_idx):
|
| 433 |
+
for k in self.ucg_training:
|
| 434 |
+
p = self.ucg_training[k]["p"]
|
| 435 |
+
val = self.ucg_training[k]["val"]
|
| 436 |
+
if val is None:
|
| 437 |
+
val = ""
|
| 438 |
+
for i in range(len(batch[k])):
|
| 439 |
+
if self.ucg_prng.choice(2, p=[1 - p, p]):
|
| 440 |
+
batch[k][i] = val
|
| 441 |
+
|
| 442 |
+
loss, loss_dict = self.shared_step(batch)
|
| 443 |
+
|
| 444 |
+
self.log_dict(loss_dict, prog_bar=True,
|
| 445 |
+
logger=True, on_step=True, on_epoch=True)
|
| 446 |
+
|
| 447 |
+
self.log("global_step", self.global_step,
|
| 448 |
+
prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
| 449 |
+
|
| 450 |
+
if self.use_scheduler:
|
| 451 |
+
lr = self.optimizers().param_groups[0]['lr']
|
| 452 |
+
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
| 453 |
+
|
| 454 |
+
return loss
|
| 455 |
+
|
| 456 |
+
@torch.no_grad()
|
| 457 |
+
def validation_step(self, batch, batch_idx):
|
| 458 |
+
_, loss_dict_no_ema = self.shared_step(batch)
|
| 459 |
+
with self.ema_scope():
|
| 460 |
+
_, loss_dict_ema = self.shared_step(batch)
|
| 461 |
+
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
|
| 462 |
+
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
| 463 |
+
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
| 464 |
+
|
| 465 |
+
def on_train_batch_end(self, *args, **kwargs):
|
| 466 |
+
if self.use_ema:
|
| 467 |
+
self.model_ema(self.model)
|
| 468 |
+
|
| 469 |
+
def _get_rows_from_list(self, samples):
|
| 470 |
+
n_imgs_per_row = len(samples)
|
| 471 |
+
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
| 472 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
| 473 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
| 474 |
+
return denoise_grid
|
| 475 |
+
|
| 476 |
+
@torch.no_grad()
|
| 477 |
+
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
| 478 |
+
log = dict()
|
| 479 |
+
x = self.get_input(batch, self.first_stage_key)
|
| 480 |
+
N = min(x.shape[0], N)
|
| 481 |
+
n_row = min(x.shape[0], n_row)
|
| 482 |
+
x = x.to(self.device)[:N]
|
| 483 |
+
log["inputs"] = x
|
| 484 |
+
|
| 485 |
+
# get diffusion row
|
| 486 |
+
diffusion_row = list()
|
| 487 |
+
x_start = x[:n_row]
|
| 488 |
+
|
| 489 |
+
for t in range(self.num_timesteps):
|
| 490 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
| 491 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
| 492 |
+
t = t.to(self.device).long()
|
| 493 |
+
noise = torch.randn_like(x_start)
|
| 494 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
| 495 |
+
diffusion_row.append(x_noisy)
|
| 496 |
+
|
| 497 |
+
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
|
| 498 |
+
|
| 499 |
+
if sample:
|
| 500 |
+
# get denoise row
|
| 501 |
+
with self.ema_scope("Plotting"):
|
| 502 |
+
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
|
| 503 |
+
|
| 504 |
+
log["samples"] = samples
|
| 505 |
+
log["denoise_row"] = self._get_rows_from_list(denoise_row)
|
| 506 |
+
|
| 507 |
+
if return_keys:
|
| 508 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
| 509 |
+
return log
|
| 510 |
+
else:
|
| 511 |
+
return {key: log[key] for key in return_keys}
|
| 512 |
+
return log
|
| 513 |
+
|
| 514 |
+
def configure_optimizers(self):
|
| 515 |
+
lr = self.learning_rate
|
| 516 |
+
params = list(self.model.parameters())
|
| 517 |
+
if self.learn_logvar:
|
| 518 |
+
params = params + [self.logvar]
|
| 519 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
| 520 |
+
return opt
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
class LatentDiffusion(DDPM):
|
| 524 |
+
"""main class"""
|
| 525 |
+
|
| 526 |
+
def __init__(self,
|
| 527 |
+
first_stage_config,
|
| 528 |
+
cond_stage_config,
|
| 529 |
+
num_timesteps_cond=None,
|
| 530 |
+
cond_stage_key="image",
|
| 531 |
+
cond_stage_trainable=False,
|
| 532 |
+
concat_mode=True,
|
| 533 |
+
cond_stage_forward=None,
|
| 534 |
+
conditioning_key=None,
|
| 535 |
+
scale_factor=1.0,
|
| 536 |
+
scale_by_std=False,
|
| 537 |
+
force_null_conditioning=False,
|
| 538 |
+
*args, **kwargs):
|
| 539 |
+
self.force_null_conditioning = force_null_conditioning
|
| 540 |
+
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
| 541 |
+
self.scale_by_std = scale_by_std
|
| 542 |
+
assert self.num_timesteps_cond <= kwargs['timesteps']
|
| 543 |
+
# for backwards compatibility after implementation of DiffusionWrapper
|
| 544 |
+
if conditioning_key is None:
|
| 545 |
+
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
| 546 |
+
if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
|
| 547 |
+
conditioning_key = None
|
| 548 |
+
ckpt_path = kwargs.pop("ckpt_path", None)
|
| 549 |
+
reset_ema = kwargs.pop("reset_ema", False)
|
| 550 |
+
reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
|
| 551 |
+
ignore_keys = kwargs.pop("ignore_keys", [])
|
| 552 |
+
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
|
| 553 |
+
self.concat_mode = concat_mode
|
| 554 |
+
self.cond_stage_trainable = cond_stage_trainable
|
| 555 |
+
self.cond_stage_key = cond_stage_key
|
| 556 |
+
try:
|
| 557 |
+
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
| 558 |
+
except:
|
| 559 |
+
self.num_downs = 0
|
| 560 |
+
if not scale_by_std:
|
| 561 |
+
self.scale_factor = scale_factor
|
| 562 |
+
else:
|
| 563 |
+
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
| 564 |
+
self.instantiate_first_stage(first_stage_config)
|
| 565 |
+
self.instantiate_cond_stage(cond_stage_config)
|
| 566 |
+
self.cond_stage_forward = cond_stage_forward
|
| 567 |
+
self.clip_denoised = False
|
| 568 |
+
self.bbox_tokenizer = None
|
| 569 |
+
|
| 570 |
+
self.restarted_from_ckpt = False
|
| 571 |
+
if ckpt_path is not None:
|
| 572 |
+
self.init_from_ckpt(ckpt_path, ignore_keys)
|
| 573 |
+
self.restarted_from_ckpt = True
|
| 574 |
+
if reset_ema:
|
| 575 |
+
assert self.use_ema
|
| 576 |
+
print(
|
| 577 |
+
f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
|
| 578 |
+
self.model_ema = LitEma(self.model)
|
| 579 |
+
if reset_num_ema_updates:
|
| 580 |
+
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
|
| 581 |
+
assert self.use_ema
|
| 582 |
+
self.model_ema.reset_num_updates()
|
| 583 |
+
|
| 584 |
+
def make_cond_schedule(self, ):
|
| 585 |
+
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
|
| 586 |
+
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
|
| 587 |
+
self.cond_ids[:self.num_timesteps_cond] = ids
|
| 588 |
+
|
| 589 |
+
@rank_zero_only
|
| 590 |
+
@torch.no_grad()
|
| 591 |
+
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
|
| 592 |
+
# only for very first batch
|
| 593 |
+
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
|
| 594 |
+
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
|
| 595 |
+
# set rescale weight to 1./std of encodings
|
| 596 |
+
print("### USING STD-RESCALING ###")
|
| 597 |
+
x = super().get_input(batch, self.first_stage_key)
|
| 598 |
+
x = x.to(self.device)
|
| 599 |
+
encoder_posterior = self.encode_first_stage(x)
|
| 600 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
| 601 |
+
del self.scale_factor
|
| 602 |
+
self.register_buffer('scale_factor', 1. / z.flatten().std())
|
| 603 |
+
print(f"setting self.scale_factor to {self.scale_factor}")
|
| 604 |
+
print("### USING STD-RESCALING ###")
|
| 605 |
+
|
| 606 |
+
def register_schedule(self,
|
| 607 |
+
given_betas=None, beta_schedule="linear", timesteps=1000,
|
| 608 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
| 609 |
+
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
|
| 610 |
+
|
| 611 |
+
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
| 612 |
+
if self.shorten_cond_schedule:
|
| 613 |
+
self.make_cond_schedule()
|
| 614 |
+
|
| 615 |
+
def instantiate_first_stage(self, config):
|
| 616 |
+
model = instantiate_from_config(config)
|
| 617 |
+
self.first_stage_model = model.eval()
|
| 618 |
+
self.first_stage_model.train = disabled_train
|
| 619 |
+
for param in self.first_stage_model.parameters():
|
| 620 |
+
param.requires_grad = False
|
| 621 |
+
|
| 622 |
+
def instantiate_cond_stage(self, config):
|
| 623 |
+
if not self.cond_stage_trainable:
|
| 624 |
+
if config == "__is_first_stage__":
|
| 625 |
+
print("Using first stage also as cond stage.")
|
| 626 |
+
self.cond_stage_model = self.first_stage_model
|
| 627 |
+
elif config == "__is_unconditional__":
|
| 628 |
+
print(f"Training {self.__class__.__name__} as an unconditional model.")
|
| 629 |
+
self.cond_stage_model = None
|
| 630 |
+
# self.be_unconditional = True
|
| 631 |
+
else:
|
| 632 |
+
model = instantiate_from_config(config)
|
| 633 |
+
self.cond_stage_model = model.eval()
|
| 634 |
+
self.cond_stage_model.train = disabled_train
|
| 635 |
+
for param in self.cond_stage_model.parameters():
|
| 636 |
+
param.requires_grad = False
|
| 637 |
+
else:
|
| 638 |
+
assert config != '__is_first_stage__'
|
| 639 |
+
assert config != '__is_unconditional__'
|
| 640 |
+
model = instantiate_from_config(config)
|
| 641 |
+
self.cond_stage_model = model
|
| 642 |
+
|
| 643 |
+
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
|
| 644 |
+
denoise_row = []
|
| 645 |
+
for zd in tqdm(samples, desc=desc):
|
| 646 |
+
denoise_row.append(self.decode_first_stage(zd.to(self.device),
|
| 647 |
+
force_not_quantize=force_no_decoder_quantization))
|
| 648 |
+
n_imgs_per_row = len(denoise_row)
|
| 649 |
+
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
| 650 |
+
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
| 651 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
| 652 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
| 653 |
+
return denoise_grid
|
| 654 |
+
|
| 655 |
+
def get_first_stage_encoding(self, encoder_posterior):
|
| 656 |
+
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
| 657 |
+
z = encoder_posterior.sample()
|
| 658 |
+
elif isinstance(encoder_posterior, torch.Tensor):
|
| 659 |
+
z = encoder_posterior
|
| 660 |
+
else:
|
| 661 |
+
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
|
| 662 |
+
return self.scale_factor * z
|
| 663 |
+
|
| 664 |
+
def get_learned_conditioning(self, c):
|
| 665 |
+
if self.cond_stage_forward is None:
|
| 666 |
+
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
|
| 667 |
+
c = self.cond_stage_model.encode(c)
|
| 668 |
+
if isinstance(c, DiagonalGaussianDistribution):
|
| 669 |
+
c = c.mode()
|
| 670 |
+
else:
|
| 671 |
+
c = self.cond_stage_model(c)
|
| 672 |
+
else:
|
| 673 |
+
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
| 674 |
+
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
|
| 675 |
+
return c
|
| 676 |
+
|
| 677 |
+
def meshgrid(self, h, w):
|
| 678 |
+
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
| 679 |
+
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
| 680 |
+
|
| 681 |
+
arr = torch.cat([y, x], dim=-1)
|
| 682 |
+
return arr
|
| 683 |
+
|
| 684 |
+
def delta_border(self, h, w):
|
| 685 |
+
"""
|
| 686 |
+
:param h: height
|
| 687 |
+
:param w: width
|
| 688 |
+
:return: normalized distance to image border,
|
| 689 |
+
wtith min distance = 0 at border and max dist = 0.5 at image center
|
| 690 |
+
"""
|
| 691 |
+
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
| 692 |
+
arr = self.meshgrid(h, w) / lower_right_corner
|
| 693 |
+
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
| 694 |
+
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
| 695 |
+
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
|
| 696 |
+
return edge_dist
|
| 697 |
+
|
| 698 |
+
def get_weighting(self, h, w, Ly, Lx, device):
|
| 699 |
+
weighting = self.delta_border(h, w)
|
| 700 |
+
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
|
| 701 |
+
self.split_input_params["clip_max_weight"], )
|
| 702 |
+
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
| 703 |
+
|
| 704 |
+
if self.split_input_params["tie_braker"]:
|
| 705 |
+
L_weighting = self.delta_border(Ly, Lx)
|
| 706 |
+
L_weighting = torch.clip(L_weighting,
|
| 707 |
+
self.split_input_params["clip_min_tie_weight"],
|
| 708 |
+
self.split_input_params["clip_max_tie_weight"])
|
| 709 |
+
|
| 710 |
+
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
| 711 |
+
weighting = weighting * L_weighting
|
| 712 |
+
return weighting
|
| 713 |
+
|
| 714 |
+
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
|
| 715 |
+
"""
|
| 716 |
+
:param x: img of size (bs, c, h, w)
|
| 717 |
+
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
| 718 |
+
"""
|
| 719 |
+
bs, nc, h, w = x.shape
|
| 720 |
+
|
| 721 |
+
# number of crops in image
|
| 722 |
+
Ly = (h - kernel_size[0]) // stride[0] + 1
|
| 723 |
+
Lx = (w - kernel_size[1]) // stride[1] + 1
|
| 724 |
+
|
| 725 |
+
if uf == 1 and df == 1:
|
| 726 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
| 727 |
+
unfold = torch.nn.Unfold(**fold_params)
|
| 728 |
+
|
| 729 |
+
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
| 730 |
+
|
| 731 |
+
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
|
| 732 |
+
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
|
| 733 |
+
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
|
| 734 |
+
|
| 735 |
+
elif uf > 1 and df == 1:
|
| 736 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
| 737 |
+
unfold = torch.nn.Unfold(**fold_params)
|
| 738 |
+
|
| 739 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
| 740 |
+
dilation=1, padding=0,
|
| 741 |
+
stride=(stride[0] * uf, stride[1] * uf))
|
| 742 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
|
| 743 |
+
|
| 744 |
+
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
|
| 745 |
+
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
|
| 746 |
+
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
|
| 747 |
+
|
| 748 |
+
elif df > 1 and uf == 1:
|
| 749 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
| 750 |
+
unfold = torch.nn.Unfold(**fold_params)
|
| 751 |
+
|
| 752 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
| 753 |
+
dilation=1, padding=0,
|
| 754 |
+
stride=(stride[0] // df, stride[1] // df))
|
| 755 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
|
| 756 |
+
|
| 757 |
+
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
|
| 758 |
+
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
|
| 759 |
+
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
|
| 760 |
+
|
| 761 |
+
else:
|
| 762 |
+
raise NotImplementedError
|
| 763 |
+
|
| 764 |
+
return fold, unfold, normalization, weighting
|
| 765 |
+
|
| 766 |
+
@torch.no_grad()
|
| 767 |
+
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
|
| 768 |
+
cond_key=None, return_original_cond=False, bs=None, return_x=False):
|
| 769 |
+
x = super().get_input(batch, k)
|
| 770 |
+
if bs is not None:
|
| 771 |
+
x = x[:bs]
|
| 772 |
+
x = x.to(self.device)
|
| 773 |
+
encoder_posterior = self.encode_first_stage(x)
|
| 774 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
| 775 |
+
|
| 776 |
+
if self.model.conditioning_key is not None and not self.force_null_conditioning:
|
| 777 |
+
if cond_key is None:
|
| 778 |
+
cond_key = self.cond_stage_key
|
| 779 |
+
if cond_key != self.first_stage_key:
|
| 780 |
+
if cond_key in ['caption', 'coordinates_bbox', "txt"]:
|
| 781 |
+
xc = batch[cond_key]
|
| 782 |
+
elif cond_key in ['class_label', 'cls']:
|
| 783 |
+
xc = batch
|
| 784 |
+
else:
|
| 785 |
+
xc = super().get_input(batch, cond_key).to(self.device)
|
| 786 |
+
else:
|
| 787 |
+
xc = x
|
| 788 |
+
if not self.cond_stage_trainable or force_c_encode:
|
| 789 |
+
if isinstance(xc, dict) or isinstance(xc, list):
|
| 790 |
+
c = self.get_learned_conditioning(xc)
|
| 791 |
+
else:
|
| 792 |
+
c = self.get_learned_conditioning(xc.to(self.device))
|
| 793 |
+
else:
|
| 794 |
+
c = xc
|
| 795 |
+
if bs is not None:
|
| 796 |
+
c = c[:bs]
|
| 797 |
+
|
| 798 |
+
if self.use_positional_encodings:
|
| 799 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
| 800 |
+
ckey = __conditioning_keys__[self.model.conditioning_key]
|
| 801 |
+
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
| 802 |
+
|
| 803 |
+
else:
|
| 804 |
+
c = None
|
| 805 |
+
xc = None
|
| 806 |
+
if self.use_positional_encodings:
|
| 807 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
| 808 |
+
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
| 809 |
+
out = [z, c]
|
| 810 |
+
if return_first_stage_outputs:
|
| 811 |
+
xrec = self.decode_first_stage(z)
|
| 812 |
+
out.extend([x, xrec])
|
| 813 |
+
if return_x:
|
| 814 |
+
out.extend([x])
|
| 815 |
+
if return_original_cond:
|
| 816 |
+
out.append(xc)
|
| 817 |
+
return out
|
| 818 |
+
|
| 819 |
+
@torch.no_grad()
|
| 820 |
+
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
| 821 |
+
if predict_cids:
|
| 822 |
+
if z.dim() == 4:
|
| 823 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
| 824 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
| 825 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
| 826 |
+
|
| 827 |
+
z = 1. / self.scale_factor * z
|
| 828 |
+
return self.first_stage_model.decode(z)
|
| 829 |
+
|
| 830 |
+
@torch.no_grad()
|
| 831 |
+
def encode_first_stage(self, x):
|
| 832 |
+
return self.first_stage_model.encode(x)
|
| 833 |
+
|
| 834 |
+
def shared_step(self, batch, **kwargs):
|
| 835 |
+
x, c = self.get_input(batch, self.first_stage_key)
|
| 836 |
+
loss = self(x, c)
|
| 837 |
+
return loss
|
| 838 |
+
|
| 839 |
+
def forward(self, x, c, *args, **kwargs):
|
| 840 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
| 841 |
+
if self.model.conditioning_key is not None:
|
| 842 |
+
assert c is not None
|
| 843 |
+
if self.cond_stage_trainable:
|
| 844 |
+
c = self.get_learned_conditioning(c)
|
| 845 |
+
if self.shorten_cond_schedule: # TODO: drop this option
|
| 846 |
+
tc = self.cond_ids[t].to(self.device)
|
| 847 |
+
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
| 848 |
+
return self.p_losses(x, c, t, *args, **kwargs)
|
| 849 |
+
|
| 850 |
+
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
| 851 |
+
if isinstance(cond, dict):
|
| 852 |
+
# hybrid case, cond is expected to be a dict
|
| 853 |
+
pass
|
| 854 |
+
else:
|
| 855 |
+
if not isinstance(cond, list):
|
| 856 |
+
cond = [cond]
|
| 857 |
+
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
|
| 858 |
+
cond = {key: cond}
|
| 859 |
+
|
| 860 |
+
x_recon = self.model(x_noisy, t, **cond)
|
| 861 |
+
|
| 862 |
+
if isinstance(x_recon, tuple) and not return_ids:
|
| 863 |
+
return x_recon[0]
|
| 864 |
+
else:
|
| 865 |
+
return x_recon
|
| 866 |
+
|
| 867 |
+
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
| 868 |
+
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
|
| 869 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
| 870 |
+
|
| 871 |
+
def _prior_bpd(self, x_start):
|
| 872 |
+
"""
|
| 873 |
+
Get the prior KL term for the variational lower-bound, measured in
|
| 874 |
+
bits-per-dim.
|
| 875 |
+
This term can't be optimized, as it only depends on the encoder.
|
| 876 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
| 877 |
+
:return: a batch of [N] KL values (in bits), one per batch element.
|
| 878 |
+
"""
|
| 879 |
+
batch_size = x_start.shape[0]
|
| 880 |
+
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
| 881 |
+
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
| 882 |
+
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
|
| 883 |
+
return mean_flat(kl_prior) / np.log(2.0)
|
| 884 |
+
|
| 885 |
+
def p_losses(self, x_start, cond, t, noise=None):
|
| 886 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
| 887 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
| 888 |
+
model_output = self.apply_model(x_noisy, t, cond)
|
| 889 |
+
|
| 890 |
+
loss_dict = {}
|
| 891 |
+
prefix = 'train' if self.training else 'val'
|
| 892 |
+
|
| 893 |
+
if self.parameterization == "x0":
|
| 894 |
+
target = x_start
|
| 895 |
+
elif self.parameterization == "eps":
|
| 896 |
+
target = noise
|
| 897 |
+
elif self.parameterization == "v":
|
| 898 |
+
target = self.get_v(x_start, noise, t)
|
| 899 |
+
else:
|
| 900 |
+
raise NotImplementedError()
|
| 901 |
+
|
| 902 |
+
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
|
| 903 |
+
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
| 904 |
+
|
| 905 |
+
logvar_t = self.logvar[t].to(self.device)
|
| 906 |
+
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
| 907 |
+
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
| 908 |
+
if self.learn_logvar:
|
| 909 |
+
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
| 910 |
+
loss_dict.update({'logvar': self.logvar.data.mean()})
|
| 911 |
+
|
| 912 |
+
loss = self.l_simple_weight * loss.mean()
|
| 913 |
+
|
| 914 |
+
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
|
| 915 |
+
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
| 916 |
+
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
| 917 |
+
loss += (self.original_elbo_weight * loss_vlb)
|
| 918 |
+
loss_dict.update({f'{prefix}/loss': loss})
|
| 919 |
+
|
| 920 |
+
return loss, loss_dict
|
| 921 |
+
|
| 922 |
+
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
|
| 923 |
+
return_x0=False, score_corrector=None, corrector_kwargs=None):
|
| 924 |
+
t_in = t
|
| 925 |
+
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
|
| 926 |
+
|
| 927 |
+
if score_corrector is not None:
|
| 928 |
+
assert self.parameterization == "eps"
|
| 929 |
+
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
|
| 930 |
+
|
| 931 |
+
if return_codebook_ids:
|
| 932 |
+
model_out, logits = model_out
|
| 933 |
+
|
| 934 |
+
if self.parameterization == "eps":
|
| 935 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
| 936 |
+
elif self.parameterization == "x0":
|
| 937 |
+
x_recon = model_out
|
| 938 |
+
else:
|
| 939 |
+
raise NotImplementedError()
|
| 940 |
+
|
| 941 |
+
if clip_denoised:
|
| 942 |
+
x_recon.clamp_(-1., 1.)
|
| 943 |
+
if quantize_denoised:
|
| 944 |
+
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
|
| 945 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
| 946 |
+
if return_codebook_ids:
|
| 947 |
+
return model_mean, posterior_variance, posterior_log_variance, logits
|
| 948 |
+
elif return_x0:
|
| 949 |
+
return model_mean, posterior_variance, posterior_log_variance, x_recon
|
| 950 |
+
else:
|
| 951 |
+
return model_mean, posterior_variance, posterior_log_variance
|
| 952 |
+
|
| 953 |
+
@torch.no_grad()
|
| 954 |
+
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
|
| 955 |
+
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
|
| 956 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
|
| 957 |
+
b, *_, device = *x.shape, x.device
|
| 958 |
+
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
|
| 959 |
+
return_codebook_ids=return_codebook_ids,
|
| 960 |
+
quantize_denoised=quantize_denoised,
|
| 961 |
+
return_x0=return_x0,
|
| 962 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
| 963 |
+
if return_codebook_ids:
|
| 964 |
+
raise DeprecationWarning("Support dropped.")
|
| 965 |
+
model_mean, _, model_log_variance, logits = outputs
|
| 966 |
+
elif return_x0:
|
| 967 |
+
model_mean, _, model_log_variance, x0 = outputs
|
| 968 |
+
else:
|
| 969 |
+
model_mean, _, model_log_variance = outputs
|
| 970 |
+
|
| 971 |
+
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
| 972 |
+
if noise_dropout > 0.:
|
| 973 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
| 974 |
+
# no noise when t == 0
|
| 975 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
| 976 |
+
|
| 977 |
+
if return_codebook_ids:
|
| 978 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
|
| 979 |
+
if return_x0:
|
| 980 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
|
| 981 |
+
else:
|
| 982 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
| 983 |
+
|
| 984 |
+
@torch.no_grad()
|
| 985 |
+
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
|
| 986 |
+
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
|
| 987 |
+
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
|
| 988 |
+
log_every_t=None):
|
| 989 |
+
if not log_every_t:
|
| 990 |
+
log_every_t = self.log_every_t
|
| 991 |
+
timesteps = self.num_timesteps
|
| 992 |
+
if batch_size is not None:
|
| 993 |
+
b = batch_size if batch_size is not None else shape[0]
|
| 994 |
+
shape = [batch_size] + list(shape)
|
| 995 |
+
else:
|
| 996 |
+
b = batch_size = shape[0]
|
| 997 |
+
if x_T is None:
|
| 998 |
+
img = torch.randn(shape, device=self.device)
|
| 999 |
+
else:
|
| 1000 |
+
img = x_T
|
| 1001 |
+
intermediates = []
|
| 1002 |
+
if cond is not None:
|
| 1003 |
+
if isinstance(cond, dict):
|
| 1004 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
| 1005 |
+
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
| 1006 |
+
else:
|
| 1007 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
| 1008 |
+
|
| 1009 |
+
if start_T is not None:
|
| 1010 |
+
timesteps = min(timesteps, start_T)
|
| 1011 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
|
| 1012 |
+
total=timesteps) if verbose else reversed(
|
| 1013 |
+
range(0, timesteps))
|
| 1014 |
+
if type(temperature) == float:
|
| 1015 |
+
temperature = [temperature] * timesteps
|
| 1016 |
+
|
| 1017 |
+
for i in iterator:
|
| 1018 |
+
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
| 1019 |
+
if self.shorten_cond_schedule:
|
| 1020 |
+
assert self.model.conditioning_key != 'hybrid'
|
| 1021 |
+
tc = self.cond_ids[ts].to(cond.device)
|
| 1022 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
| 1023 |
+
|
| 1024 |
+
img, x0_partial = self.p_sample(img, cond, ts,
|
| 1025 |
+
clip_denoised=self.clip_denoised,
|
| 1026 |
+
quantize_denoised=quantize_denoised, return_x0=True,
|
| 1027 |
+
temperature=temperature[i], noise_dropout=noise_dropout,
|
| 1028 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
| 1029 |
+
if mask is not None:
|
| 1030 |
+
assert x0 is not None
|
| 1031 |
+
img_orig = self.q_sample(x0, ts)
|
| 1032 |
+
img = img_orig * mask + (1. - mask) * img
|
| 1033 |
+
|
| 1034 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
| 1035 |
+
intermediates.append(x0_partial)
|
| 1036 |
+
if callback: callback(i)
|
| 1037 |
+
if img_callback: img_callback(img, i)
|
| 1038 |
+
return img, intermediates
|
| 1039 |
+
|
| 1040 |
+
@torch.no_grad()
|
| 1041 |
+
def p_sample_loop(self, cond, shape, return_intermediates=False,
|
| 1042 |
+
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
|
| 1043 |
+
mask=None, x0=None, img_callback=None, start_T=None,
|
| 1044 |
+
log_every_t=None):
|
| 1045 |
+
|
| 1046 |
+
if not log_every_t:
|
| 1047 |
+
log_every_t = self.log_every_t
|
| 1048 |
+
device = self.betas.device
|
| 1049 |
+
b = shape[0]
|
| 1050 |
+
if x_T is None:
|
| 1051 |
+
img = torch.randn(shape, device=device)
|
| 1052 |
+
else:
|
| 1053 |
+
img = x_T
|
| 1054 |
+
|
| 1055 |
+
intermediates = [img]
|
| 1056 |
+
if timesteps is None:
|
| 1057 |
+
timesteps = self.num_timesteps
|
| 1058 |
+
|
| 1059 |
+
if start_T is not None:
|
| 1060 |
+
timesteps = min(timesteps, start_T)
|
| 1061 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
|
| 1062 |
+
range(0, timesteps))
|
| 1063 |
+
|
| 1064 |
+
if mask is not None:
|
| 1065 |
+
assert x0 is not None
|
| 1066 |
+
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
|
| 1067 |
+
|
| 1068 |
+
for i in iterator:
|
| 1069 |
+
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
| 1070 |
+
if self.shorten_cond_schedule:
|
| 1071 |
+
assert self.model.conditioning_key != 'hybrid'
|
| 1072 |
+
tc = self.cond_ids[ts].to(cond.device)
|
| 1073 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
| 1074 |
+
|
| 1075 |
+
img = self.p_sample(img, cond, ts,
|
| 1076 |
+
clip_denoised=self.clip_denoised,
|
| 1077 |
+
quantize_denoised=quantize_denoised)
|
| 1078 |
+
if mask is not None:
|
| 1079 |
+
img_orig = self.q_sample(x0, ts)
|
| 1080 |
+
img = img_orig * mask + (1. - mask) * img
|
| 1081 |
+
|
| 1082 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
| 1083 |
+
intermediates.append(img)
|
| 1084 |
+
if callback: callback(i)
|
| 1085 |
+
if img_callback: img_callback(img, i)
|
| 1086 |
+
|
| 1087 |
+
if return_intermediates:
|
| 1088 |
+
return img, intermediates
|
| 1089 |
+
return img
|
| 1090 |
+
|
| 1091 |
+
@torch.no_grad()
|
| 1092 |
+
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
|
| 1093 |
+
verbose=True, timesteps=None, quantize_denoised=False,
|
| 1094 |
+
mask=None, x0=None, shape=None, **kwargs):
|
| 1095 |
+
if shape is None:
|
| 1096 |
+
shape = (batch_size, self.channels, self.image_size, self.image_size)
|
| 1097 |
+
if cond is not None:
|
| 1098 |
+
if isinstance(cond, dict):
|
| 1099 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
| 1100 |
+
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
| 1101 |
+
else:
|
| 1102 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
| 1103 |
+
return self.p_sample_loop(cond,
|
| 1104 |
+
shape,
|
| 1105 |
+
return_intermediates=return_intermediates, x_T=x_T,
|
| 1106 |
+
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
|
| 1107 |
+
mask=mask, x0=x0)
|
| 1108 |
+
|
| 1109 |
+
@torch.no_grad()
|
| 1110 |
+
def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
|
| 1111 |
+
if ddim:
|
| 1112 |
+
ddim_sampler = DDIMSampler(self)
|
| 1113 |
+
shape = (self.channels, self.image_size, self.image_size)
|
| 1114 |
+
samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
|
| 1115 |
+
shape, cond, verbose=False, **kwargs)
|
| 1116 |
+
|
| 1117 |
+
else:
|
| 1118 |
+
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
|
| 1119 |
+
return_intermediates=True, **kwargs)
|
| 1120 |
+
|
| 1121 |
+
return samples, intermediates
|
| 1122 |
+
|
| 1123 |
+
@torch.no_grad()
|
| 1124 |
+
def get_unconditional_conditioning(self, batch_size, null_label=None):
|
| 1125 |
+
if null_label is not None:
|
| 1126 |
+
xc = null_label
|
| 1127 |
+
if isinstance(xc, ListConfig):
|
| 1128 |
+
xc = list(xc)
|
| 1129 |
+
if isinstance(xc, dict) or isinstance(xc, list):
|
| 1130 |
+
c = self.get_learned_conditioning(xc)
|
| 1131 |
+
else:
|
| 1132 |
+
if hasattr(xc, "to"):
|
| 1133 |
+
xc = xc.to(self.device)
|
| 1134 |
+
c = self.get_learned_conditioning(xc)
|
| 1135 |
+
else:
|
| 1136 |
+
if self.cond_stage_key in ["class_label", "cls"]:
|
| 1137 |
+
xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
|
| 1138 |
+
return self.get_learned_conditioning(xc)
|
| 1139 |
+
else:
|
| 1140 |
+
raise NotImplementedError("todo")
|
| 1141 |
+
if isinstance(c, list): # in case the encoder gives us a list
|
| 1142 |
+
for i in range(len(c)):
|
| 1143 |
+
c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
|
| 1144 |
+
else:
|
| 1145 |
+
c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
|
| 1146 |
+
return c
|
| 1147 |
+
|
| 1148 |
+
@torch.no_grad()
|
| 1149 |
+
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
|
| 1150 |
+
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
| 1151 |
+
plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
|
| 1152 |
+
use_ema_scope=True,
|
| 1153 |
+
**kwargs):
|
| 1154 |
+
ema_scope = self.ema_scope if use_ema_scope else nullcontext
|
| 1155 |
+
use_ddim = ddim_steps is not None
|
| 1156 |
+
|
| 1157 |
+
log = dict()
|
| 1158 |
+
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
| 1159 |
+
return_first_stage_outputs=True,
|
| 1160 |
+
force_c_encode=True,
|
| 1161 |
+
return_original_cond=True,
|
| 1162 |
+
bs=N)
|
| 1163 |
+
N = min(x.shape[0], N)
|
| 1164 |
+
n_row = min(x.shape[0], n_row)
|
| 1165 |
+
log["inputs"] = x
|
| 1166 |
+
log["reconstruction"] = xrec
|
| 1167 |
+
if self.model.conditioning_key is not None:
|
| 1168 |
+
if hasattr(self.cond_stage_model, "decode"):
|
| 1169 |
+
xc = self.cond_stage_model.decode(c)
|
| 1170 |
+
log["conditioning"] = xc
|
| 1171 |
+
elif self.cond_stage_key in ["caption", "txt"]:
|
| 1172 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
|
| 1173 |
+
log["conditioning"] = xc
|
| 1174 |
+
elif self.cond_stage_key in ['class_label', "cls"]:
|
| 1175 |
+
try:
|
| 1176 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
|
| 1177 |
+
log['conditioning'] = xc
|
| 1178 |
+
except KeyError:
|
| 1179 |
+
# probably no "human_label" in batch
|
| 1180 |
+
pass
|
| 1181 |
+
elif isimage(xc):
|
| 1182 |
+
log["conditioning"] = xc
|
| 1183 |
+
if ismap(xc):
|
| 1184 |
+
log["original_conditioning"] = self.to_rgb(xc)
|
| 1185 |
+
|
| 1186 |
+
if plot_diffusion_rows:
|
| 1187 |
+
# get diffusion row
|
| 1188 |
+
diffusion_row = list()
|
| 1189 |
+
z_start = z[:n_row]
|
| 1190 |
+
for t in range(self.num_timesteps):
|
| 1191 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
| 1192 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
| 1193 |
+
t = t.to(self.device).long()
|
| 1194 |
+
noise = torch.randn_like(z_start)
|
| 1195 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
| 1196 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
| 1197 |
+
|
| 1198 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
| 1199 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
| 1200 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
| 1201 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
| 1202 |
+
log["diffusion_row"] = diffusion_grid
|
| 1203 |
+
|
| 1204 |
+
if sample:
|
| 1205 |
+
# get denoise row
|
| 1206 |
+
with ema_scope("Sampling"):
|
| 1207 |
+
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
| 1208 |
+
ddim_steps=ddim_steps, eta=ddim_eta)
|
| 1209 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
| 1210 |
+
x_samples = self.decode_first_stage(samples)
|
| 1211 |
+
log["samples"] = x_samples
|
| 1212 |
+
if plot_denoise_rows:
|
| 1213 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
| 1214 |
+
log["denoise_row"] = denoise_grid
|
| 1215 |
+
|
| 1216 |
+
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
|
| 1217 |
+
self.first_stage_model, IdentityFirstStage):
|
| 1218 |
+
# also display when quantizing x0 while sampling
|
| 1219 |
+
with ema_scope("Plotting Quantized Denoised"):
|
| 1220 |
+
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
| 1221 |
+
ddim_steps=ddim_steps, eta=ddim_eta,
|
| 1222 |
+
quantize_denoised=True)
|
| 1223 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
| 1224 |
+
# quantize_denoised=True)
|
| 1225 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
| 1226 |
+
log["samples_x0_quantized"] = x_samples
|
| 1227 |
+
|
| 1228 |
+
if unconditional_guidance_scale > 1.0:
|
| 1229 |
+
uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
|
| 1230 |
+
if self.model.conditioning_key == "crossattn-adm":
|
| 1231 |
+
uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
|
| 1232 |
+
with ema_scope("Sampling with classifier-free guidance"):
|
| 1233 |
+
samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
| 1234 |
+
ddim_steps=ddim_steps, eta=ddim_eta,
|
| 1235 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 1236 |
+
unconditional_conditioning=uc,
|
| 1237 |
+
)
|
| 1238 |
+
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
| 1239 |
+
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
| 1240 |
+
|
| 1241 |
+
if inpaint:
|
| 1242 |
+
# make a simple center square
|
| 1243 |
+
b, h, w = z.shape[0], z.shape[2], z.shape[3]
|
| 1244 |
+
mask = torch.ones(N, h, w).to(self.device)
|
| 1245 |
+
# zeros will be filled in
|
| 1246 |
+
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
| 1247 |
+
mask = mask[:, None, ...]
|
| 1248 |
+
with ema_scope("Plotting Inpaint"):
|
| 1249 |
+
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
|
| 1250 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
| 1251 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
| 1252 |
+
log["samples_inpainting"] = x_samples
|
| 1253 |
+
log["mask"] = mask
|
| 1254 |
+
|
| 1255 |
+
# outpaint
|
| 1256 |
+
mask = 1. - mask
|
| 1257 |
+
with ema_scope("Plotting Outpaint"):
|
| 1258 |
+
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
|
| 1259 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
| 1260 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
| 1261 |
+
log["samples_outpainting"] = x_samples
|
| 1262 |
+
|
| 1263 |
+
if plot_progressive_rows:
|
| 1264 |
+
with ema_scope("Plotting Progressives"):
|
| 1265 |
+
img, progressives = self.progressive_denoising(c,
|
| 1266 |
+
shape=(self.channels, self.image_size, self.image_size),
|
| 1267 |
+
batch_size=N)
|
| 1268 |
+
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
| 1269 |
+
log["progressive_row"] = prog_row
|
| 1270 |
+
|
| 1271 |
+
if return_keys:
|
| 1272 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
| 1273 |
+
return log
|
| 1274 |
+
else:
|
| 1275 |
+
return {key: log[key] for key in return_keys}
|
| 1276 |
+
return log
|
| 1277 |
+
|
| 1278 |
+
def configure_optimizers(self):
|
| 1279 |
+
lr = self.learning_rate
|
| 1280 |
+
params = list(self.model.parameters())
|
| 1281 |
+
if self.cond_stage_trainable:
|
| 1282 |
+
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
|
| 1283 |
+
params = params + list(self.cond_stage_model.parameters())
|
| 1284 |
+
if self.learn_logvar:
|
| 1285 |
+
print('Diffusion model optimizing logvar')
|
| 1286 |
+
params.append(self.logvar)
|
| 1287 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
| 1288 |
+
if self.use_scheduler:
|
| 1289 |
+
assert 'target' in self.scheduler_config
|
| 1290 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
| 1291 |
+
|
| 1292 |
+
print("Setting up LambdaLR scheduler...")
|
| 1293 |
+
scheduler = [
|
| 1294 |
+
{
|
| 1295 |
+
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
| 1296 |
+
'interval': 'step',
|
| 1297 |
+
'frequency': 1
|
| 1298 |
+
}]
|
| 1299 |
+
return [opt], scheduler
|
| 1300 |
+
return opt
|
| 1301 |
+
|
| 1302 |
+
@torch.no_grad()
|
| 1303 |
+
def to_rgb(self, x):
|
| 1304 |
+
x = x.float()
|
| 1305 |
+
if not hasattr(self, "colorize"):
|
| 1306 |
+
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
| 1307 |
+
x = nn.functional.conv2d(x, weight=self.colorize)
|
| 1308 |
+
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
|
| 1309 |
+
return x
|
| 1310 |
+
|
| 1311 |
+
|
| 1312 |
+
class DiffusionWrapper(pl.LightningModule):
|
| 1313 |
+
def __init__(self, diff_model_config, conditioning_key):
|
| 1314 |
+
super().__init__()
|
| 1315 |
+
self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
|
| 1316 |
+
self.diffusion_model = instantiate_from_config(diff_model_config)
|
| 1317 |
+
self.conditioning_key = conditioning_key
|
| 1318 |
+
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
|
| 1319 |
+
|
| 1320 |
+
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
|
| 1321 |
+
if self.conditioning_key is None:
|
| 1322 |
+
out = self.diffusion_model(x, t)
|
| 1323 |
+
elif self.conditioning_key == 'concat':
|
| 1324 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
| 1325 |
+
out = self.diffusion_model(xc, t)
|
| 1326 |
+
elif self.conditioning_key == 'crossattn':
|
| 1327 |
+
if not self.sequential_cross_attn:
|
| 1328 |
+
cc = torch.cat(c_crossattn, 1)
|
| 1329 |
+
else:
|
| 1330 |
+
cc = c_crossattn
|
| 1331 |
+
out = self.diffusion_model(x, t, context=cc)
|
| 1332 |
+
elif self.conditioning_key == 'hybrid':
|
| 1333 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
| 1334 |
+
cc = torch.cat(c_crossattn, 1)
|
| 1335 |
+
out = self.diffusion_model(xc, t, context=cc)
|
| 1336 |
+
elif self.conditioning_key == 'hybrid-adm':
|
| 1337 |
+
assert c_adm is not None
|
| 1338 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
| 1339 |
+
cc = torch.cat(c_crossattn, 1)
|
| 1340 |
+
out = self.diffusion_model(xc, t, context=cc, y=c_adm)
|
| 1341 |
+
elif self.conditioning_key == 'crossattn-adm':
|
| 1342 |
+
assert c_adm is not None
|
| 1343 |
+
cc = torch.cat(c_crossattn, 1)
|
| 1344 |
+
out = self.diffusion_model(x, t, context=cc, y=c_adm)
|
| 1345 |
+
elif self.conditioning_key == 'adm':
|
| 1346 |
+
cc = c_crossattn[0]
|
| 1347 |
+
out = self.diffusion_model(x, t, y=cc)
|
| 1348 |
+
else:
|
| 1349 |
+
raise NotImplementedError()
|
| 1350 |
+
|
| 1351 |
+
return out
|
| 1352 |
+
|
| 1353 |
+
|
| 1354 |
+
class LatentUpscaleDiffusion(LatentDiffusion):
|
| 1355 |
+
def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
|
| 1356 |
+
super().__init__(*args, **kwargs)
|
| 1357 |
+
# assumes that neither the cond_stage nor the low_scale_model contain trainable params
|
| 1358 |
+
assert not self.cond_stage_trainable
|
| 1359 |
+
self.instantiate_low_stage(low_scale_config)
|
| 1360 |
+
self.low_scale_key = low_scale_key
|
| 1361 |
+
self.noise_level_key = noise_level_key
|
| 1362 |
+
|
| 1363 |
+
def instantiate_low_stage(self, config):
|
| 1364 |
+
model = instantiate_from_config(config)
|
| 1365 |
+
self.low_scale_model = model.eval()
|
| 1366 |
+
self.low_scale_model.train = disabled_train
|
| 1367 |
+
for param in self.low_scale_model.parameters():
|
| 1368 |
+
param.requires_grad = False
|
| 1369 |
+
|
| 1370 |
+
@torch.no_grad()
|
| 1371 |
+
def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
|
| 1372 |
+
if not log_mode:
|
| 1373 |
+
z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
|
| 1374 |
+
else:
|
| 1375 |
+
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
| 1376 |
+
force_c_encode=True, return_original_cond=True, bs=bs)
|
| 1377 |
+
x_low = batch[self.low_scale_key][:bs]
|
| 1378 |
+
x_low = rearrange(x_low, 'b h w c -> b c h w')
|
| 1379 |
+
x_low = x_low.to(memory_format=torch.contiguous_format).float()
|
| 1380 |
+
zx, noise_level = self.low_scale_model(x_low)
|
| 1381 |
+
if self.noise_level_key is not None:
|
| 1382 |
+
# get noise level from batch instead, e.g. when extracting a custom noise level for bsr
|
| 1383 |
+
raise NotImplementedError('TODO')
|
| 1384 |
+
|
| 1385 |
+
all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
|
| 1386 |
+
if log_mode:
|
| 1387 |
+
# TODO: maybe disable if too expensive
|
| 1388 |
+
x_low_rec = self.low_scale_model.decode(zx)
|
| 1389 |
+
return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
|
| 1390 |
+
return z, all_conds
|
| 1391 |
+
|
| 1392 |
+
@torch.no_grad()
|
| 1393 |
+
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
| 1394 |
+
plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
|
| 1395 |
+
unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
|
| 1396 |
+
**kwargs):
|
| 1397 |
+
ema_scope = self.ema_scope if use_ema_scope else nullcontext
|
| 1398 |
+
use_ddim = ddim_steps is not None
|
| 1399 |
+
|
| 1400 |
+
log = dict()
|
| 1401 |
+
z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
|
| 1402 |
+
log_mode=True)
|
| 1403 |
+
N = min(x.shape[0], N)
|
| 1404 |
+
n_row = min(x.shape[0], n_row)
|
| 1405 |
+
log["inputs"] = x
|
| 1406 |
+
log["reconstruction"] = xrec
|
| 1407 |
+
log["x_lr"] = x_low
|
| 1408 |
+
log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
|
| 1409 |
+
if self.model.conditioning_key is not None:
|
| 1410 |
+
if hasattr(self.cond_stage_model, "decode"):
|
| 1411 |
+
xc = self.cond_stage_model.decode(c)
|
| 1412 |
+
log["conditioning"] = xc
|
| 1413 |
+
elif self.cond_stage_key in ["caption", "txt"]:
|
| 1414 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
|
| 1415 |
+
log["conditioning"] = xc
|
| 1416 |
+
elif self.cond_stage_key in ['class_label', 'cls']:
|
| 1417 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
|
| 1418 |
+
log['conditioning'] = xc
|
| 1419 |
+
elif isimage(xc):
|
| 1420 |
+
log["conditioning"] = xc
|
| 1421 |
+
if ismap(xc):
|
| 1422 |
+
log["original_conditioning"] = self.to_rgb(xc)
|
| 1423 |
+
|
| 1424 |
+
if plot_diffusion_rows:
|
| 1425 |
+
# get diffusion row
|
| 1426 |
+
diffusion_row = list()
|
| 1427 |
+
z_start = z[:n_row]
|
| 1428 |
+
for t in range(self.num_timesteps):
|
| 1429 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
| 1430 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
| 1431 |
+
t = t.to(self.device).long()
|
| 1432 |
+
noise = torch.randn_like(z_start)
|
| 1433 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
| 1434 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
| 1435 |
+
|
| 1436 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
| 1437 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
| 1438 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
| 1439 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
| 1440 |
+
log["diffusion_row"] = diffusion_grid
|
| 1441 |
+
|
| 1442 |
+
if sample:
|
| 1443 |
+
# get denoise row
|
| 1444 |
+
with ema_scope("Sampling"):
|
| 1445 |
+
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
| 1446 |
+
ddim_steps=ddim_steps, eta=ddim_eta)
|
| 1447 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
| 1448 |
+
x_samples = self.decode_first_stage(samples)
|
| 1449 |
+
log["samples"] = x_samples
|
| 1450 |
+
if plot_denoise_rows:
|
| 1451 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
| 1452 |
+
log["denoise_row"] = denoise_grid
|
| 1453 |
+
|
| 1454 |
+
if unconditional_guidance_scale > 1.0:
|
| 1455 |
+
uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
|
| 1456 |
+
# TODO explore better "unconditional" choices for the other keys
|
| 1457 |
+
# maybe guide away from empty text label and highest noise level and maximally degraded zx?
|
| 1458 |
+
uc = dict()
|
| 1459 |
+
for k in c:
|
| 1460 |
+
if k == "c_crossattn":
|
| 1461 |
+
assert isinstance(c[k], list) and len(c[k]) == 1
|
| 1462 |
+
uc[k] = [uc_tmp]
|
| 1463 |
+
elif k == "c_adm": # todo: only run with text-based guidance?
|
| 1464 |
+
assert isinstance(c[k], torch.Tensor)
|
| 1465 |
+
#uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
|
| 1466 |
+
uc[k] = c[k]
|
| 1467 |
+
elif isinstance(c[k], list):
|
| 1468 |
+
uc[k] = [c[k][i] for i in range(len(c[k]))]
|
| 1469 |
+
else:
|
| 1470 |
+
uc[k] = c[k]
|
| 1471 |
+
|
| 1472 |
+
with ema_scope("Sampling with classifier-free guidance"):
|
| 1473 |
+
samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
| 1474 |
+
ddim_steps=ddim_steps, eta=ddim_eta,
|
| 1475 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 1476 |
+
unconditional_conditioning=uc,
|
| 1477 |
+
)
|
| 1478 |
+
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
| 1479 |
+
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
| 1480 |
+
|
| 1481 |
+
if plot_progressive_rows:
|
| 1482 |
+
with ema_scope("Plotting Progressives"):
|
| 1483 |
+
img, progressives = self.progressive_denoising(c,
|
| 1484 |
+
shape=(self.channels, self.image_size, self.image_size),
|
| 1485 |
+
batch_size=N)
|
| 1486 |
+
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
| 1487 |
+
log["progressive_row"] = prog_row
|
| 1488 |
+
|
| 1489 |
+
return log
|
| 1490 |
+
|
| 1491 |
+
|
| 1492 |
+
class LatentFinetuneDiffusion(LatentDiffusion):
|
| 1493 |
+
"""
|
| 1494 |
+
Basis for different finetunas, such as inpainting or depth2image
|
| 1495 |
+
To disable finetuning mode, set finetune_keys to None
|
| 1496 |
+
"""
|
| 1497 |
+
|
| 1498 |
+
def __init__(self,
|
| 1499 |
+
concat_keys: tuple,
|
| 1500 |
+
finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
|
| 1501 |
+
"model_ema.diffusion_modelinput_blocks00weight"
|
| 1502 |
+
),
|
| 1503 |
+
keep_finetune_dims=4,
|
| 1504 |
+
# if model was trained without concat mode before and we would like to keep these channels
|
| 1505 |
+
c_concat_log_start=None, # to log reconstruction of c_concat codes
|
| 1506 |
+
c_concat_log_end=None,
|
| 1507 |
+
*args, **kwargs
|
| 1508 |
+
):
|
| 1509 |
+
ckpt_path = kwargs.pop("ckpt_path", None)
|
| 1510 |
+
ignore_keys = kwargs.pop("ignore_keys", list())
|
| 1511 |
+
super().__init__(*args, **kwargs)
|
| 1512 |
+
self.finetune_keys = finetune_keys
|
| 1513 |
+
self.concat_keys = concat_keys
|
| 1514 |
+
self.keep_dims = keep_finetune_dims
|
| 1515 |
+
self.c_concat_log_start = c_concat_log_start
|
| 1516 |
+
self.c_concat_log_end = c_concat_log_end
|
| 1517 |
+
if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
|
| 1518 |
+
if exists(ckpt_path):
|
| 1519 |
+
self.init_from_ckpt(ckpt_path, ignore_keys)
|
| 1520 |
+
|
| 1521 |
+
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
| 1522 |
+
sd = torch.load(path, map_location="cpu")
|
| 1523 |
+
if "state_dict" in list(sd.keys()):
|
| 1524 |
+
sd = sd["state_dict"]
|
| 1525 |
+
keys = list(sd.keys())
|
| 1526 |
+
for k in keys:
|
| 1527 |
+
for ik in ignore_keys:
|
| 1528 |
+
if k.startswith(ik):
|
| 1529 |
+
print("Deleting key {} from state_dict.".format(k))
|
| 1530 |
+
del sd[k]
|
| 1531 |
+
|
| 1532 |
+
# make it explicit, finetune by including extra input channels
|
| 1533 |
+
if exists(self.finetune_keys) and k in self.finetune_keys:
|
| 1534 |
+
new_entry = None
|
| 1535 |
+
for name, param in self.named_parameters():
|
| 1536 |
+
if name in self.finetune_keys:
|
| 1537 |
+
print(
|
| 1538 |
+
f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
|
| 1539 |
+
new_entry = torch.zeros_like(param) # zero init
|
| 1540 |
+
assert exists(new_entry), 'did not find matching parameter to modify'
|
| 1541 |
+
new_entry[:, :self.keep_dims, ...] = sd[k]
|
| 1542 |
+
sd[k] = new_entry
|
| 1543 |
+
|
| 1544 |
+
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
| 1545 |
+
sd, strict=False)
|
| 1546 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
| 1547 |
+
if len(missing) > 0:
|
| 1548 |
+
print(f"Missing Keys: {missing}")
|
| 1549 |
+
if len(unexpected) > 0:
|
| 1550 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 1551 |
+
|
| 1552 |
+
@torch.no_grad()
|
| 1553 |
+
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
| 1554 |
+
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
| 1555 |
+
plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
|
| 1556 |
+
use_ema_scope=True,
|
| 1557 |
+
**kwargs):
|
| 1558 |
+
ema_scope = self.ema_scope if use_ema_scope else nullcontext
|
| 1559 |
+
use_ddim = ddim_steps is not None
|
| 1560 |
+
|
| 1561 |
+
log = dict()
|
| 1562 |
+
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
|
| 1563 |
+
c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
|
| 1564 |
+
N = min(x.shape[0], N)
|
| 1565 |
+
n_row = min(x.shape[0], n_row)
|
| 1566 |
+
log["inputs"] = x
|
| 1567 |
+
log["reconstruction"] = xrec
|
| 1568 |
+
if self.model.conditioning_key is not None:
|
| 1569 |
+
if hasattr(self.cond_stage_model, "decode"):
|
| 1570 |
+
xc = self.cond_stage_model.decode(c)
|
| 1571 |
+
log["conditioning"] = xc
|
| 1572 |
+
elif self.cond_stage_key in ["caption", "txt"]:
|
| 1573 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
|
| 1574 |
+
log["conditioning"] = xc
|
| 1575 |
+
elif self.cond_stage_key in ['class_label', 'cls']:
|
| 1576 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
|
| 1577 |
+
log['conditioning'] = xc
|
| 1578 |
+
elif isimage(xc):
|
| 1579 |
+
log["conditioning"] = xc
|
| 1580 |
+
if ismap(xc):
|
| 1581 |
+
log["original_conditioning"] = self.to_rgb(xc)
|
| 1582 |
+
|
| 1583 |
+
if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
|
| 1584 |
+
log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
|
| 1585 |
+
|
| 1586 |
+
if plot_diffusion_rows:
|
| 1587 |
+
# get diffusion row
|
| 1588 |
+
diffusion_row = list()
|
| 1589 |
+
z_start = z[:n_row]
|
| 1590 |
+
for t in range(self.num_timesteps):
|
| 1591 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
| 1592 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
| 1593 |
+
t = t.to(self.device).long()
|
| 1594 |
+
noise = torch.randn_like(z_start)
|
| 1595 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
| 1596 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
| 1597 |
+
|
| 1598 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
| 1599 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
| 1600 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
| 1601 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
| 1602 |
+
log["diffusion_row"] = diffusion_grid
|
| 1603 |
+
|
| 1604 |
+
if sample:
|
| 1605 |
+
# get denoise row
|
| 1606 |
+
with ema_scope("Sampling"):
|
| 1607 |
+
samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
| 1608 |
+
batch_size=N, ddim=use_ddim,
|
| 1609 |
+
ddim_steps=ddim_steps, eta=ddim_eta)
|
| 1610 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
| 1611 |
+
x_samples = self.decode_first_stage(samples)
|
| 1612 |
+
log["samples"] = x_samples
|
| 1613 |
+
if plot_denoise_rows:
|
| 1614 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
| 1615 |
+
log["denoise_row"] = denoise_grid
|
| 1616 |
+
|
| 1617 |
+
if unconditional_guidance_scale > 1.0:
|
| 1618 |
+
uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
|
| 1619 |
+
uc_cat = c_cat
|
| 1620 |
+
uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
|
| 1621 |
+
with ema_scope("Sampling with classifier-free guidance"):
|
| 1622 |
+
samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
| 1623 |
+
batch_size=N, ddim=use_ddim,
|
| 1624 |
+
ddim_steps=ddim_steps, eta=ddim_eta,
|
| 1625 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 1626 |
+
unconditional_conditioning=uc_full,
|
| 1627 |
+
)
|
| 1628 |
+
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
| 1629 |
+
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
| 1630 |
+
|
| 1631 |
+
return log
|
| 1632 |
+
|
| 1633 |
+
|
| 1634 |
+
class LatentInpaintDiffusion(LatentFinetuneDiffusion):
|
| 1635 |
+
"""
|
| 1636 |
+
can either run as pure inpainting model (only concat mode) or with mixed conditionings,
|
| 1637 |
+
e.g. mask as concat and text via cross-attn.
|
| 1638 |
+
To disable finetuning mode, set finetune_keys to None
|
| 1639 |
+
"""
|
| 1640 |
+
|
| 1641 |
+
def __init__(self,
|
| 1642 |
+
concat_keys=("mask", "masked_image"),
|
| 1643 |
+
masked_image_key="masked_image",
|
| 1644 |
+
*args, **kwargs
|
| 1645 |
+
):
|
| 1646 |
+
super().__init__(concat_keys, *args, **kwargs)
|
| 1647 |
+
self.masked_image_key = masked_image_key
|
| 1648 |
+
assert self.masked_image_key in concat_keys
|
| 1649 |
+
|
| 1650 |
+
@torch.no_grad()
|
| 1651 |
+
def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
|
| 1652 |
+
# note: restricted to non-trainable encoders currently
|
| 1653 |
+
assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
|
| 1654 |
+
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
| 1655 |
+
force_c_encode=True, return_original_cond=True, bs=bs)
|
| 1656 |
+
|
| 1657 |
+
assert exists(self.concat_keys)
|
| 1658 |
+
c_cat = list()
|
| 1659 |
+
for ck in self.concat_keys:
|
| 1660 |
+
cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
|
| 1661 |
+
if bs is not None:
|
| 1662 |
+
cc = cc[:bs]
|
| 1663 |
+
cc = cc.to(self.device)
|
| 1664 |
+
bchw = z.shape
|
| 1665 |
+
if ck != self.masked_image_key:
|
| 1666 |
+
cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
|
| 1667 |
+
else:
|
| 1668 |
+
cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
|
| 1669 |
+
c_cat.append(cc)
|
| 1670 |
+
c_cat = torch.cat(c_cat, dim=1)
|
| 1671 |
+
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
|
| 1672 |
+
if return_first_stage_outputs:
|
| 1673 |
+
return z, all_conds, x, xrec, xc
|
| 1674 |
+
return z, all_conds
|
| 1675 |
+
|
| 1676 |
+
@torch.no_grad()
|
| 1677 |
+
def log_images(self, *args, **kwargs):
|
| 1678 |
+
log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
|
| 1679 |
+
log["masked_image"] = rearrange(args[0]["masked_image"],
|
| 1680 |
+
'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
|
| 1681 |
+
return log
|
| 1682 |
+
|
| 1683 |
+
|
| 1684 |
+
class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
|
| 1685 |
+
"""
|
| 1686 |
+
condition on monocular depth estimation
|
| 1687 |
+
"""
|
| 1688 |
+
|
| 1689 |
+
def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
|
| 1690 |
+
super().__init__(concat_keys=concat_keys, *args, **kwargs)
|
| 1691 |
+
self.depth_model = instantiate_from_config(depth_stage_config)
|
| 1692 |
+
self.depth_stage_key = concat_keys[0]
|
| 1693 |
+
|
| 1694 |
+
@torch.no_grad()
|
| 1695 |
+
def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
|
| 1696 |
+
# note: restricted to non-trainable encoders currently
|
| 1697 |
+
assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
|
| 1698 |
+
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
| 1699 |
+
force_c_encode=True, return_original_cond=True, bs=bs)
|
| 1700 |
+
|
| 1701 |
+
assert exists(self.concat_keys)
|
| 1702 |
+
assert len(self.concat_keys) == 1
|
| 1703 |
+
c_cat = list()
|
| 1704 |
+
for ck in self.concat_keys:
|
| 1705 |
+
cc = batch[ck]
|
| 1706 |
+
if bs is not None:
|
| 1707 |
+
cc = cc[:bs]
|
| 1708 |
+
cc = cc.to(self.device)
|
| 1709 |
+
cc = self.depth_model(cc)
|
| 1710 |
+
cc = torch.nn.functional.interpolate(
|
| 1711 |
+
cc,
|
| 1712 |
+
size=z.shape[2:],
|
| 1713 |
+
mode="bicubic",
|
| 1714 |
+
align_corners=False,
|
| 1715 |
+
)
|
| 1716 |
+
|
| 1717 |
+
depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
|
| 1718 |
+
keepdim=True)
|
| 1719 |
+
cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
|
| 1720 |
+
c_cat.append(cc)
|
| 1721 |
+
c_cat = torch.cat(c_cat, dim=1)
|
| 1722 |
+
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
|
| 1723 |
+
if return_first_stage_outputs:
|
| 1724 |
+
return z, all_conds, x, xrec, xc
|
| 1725 |
+
return z, all_conds
|
| 1726 |
+
|
| 1727 |
+
@torch.no_grad()
|
| 1728 |
+
def log_images(self, *args, **kwargs):
|
| 1729 |
+
log = super().log_images(*args, **kwargs)
|
| 1730 |
+
depth = self.depth_model(args[0][self.depth_stage_key])
|
| 1731 |
+
depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
|
| 1732 |
+
torch.amax(depth, dim=[1, 2, 3], keepdim=True)
|
| 1733 |
+
log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
|
| 1734 |
+
return log
|
| 1735 |
+
|
| 1736 |
+
|
| 1737 |
+
class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
|
| 1738 |
+
"""
|
| 1739 |
+
condition on low-res image (and optionally on some spatial noise augmentation)
|
| 1740 |
+
"""
|
| 1741 |
+
def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
|
| 1742 |
+
low_scale_config=None, low_scale_key=None, *args, **kwargs):
|
| 1743 |
+
super().__init__(concat_keys=concat_keys, *args, **kwargs)
|
| 1744 |
+
self.reshuffle_patch_size = reshuffle_patch_size
|
| 1745 |
+
self.low_scale_model = None
|
| 1746 |
+
if low_scale_config is not None:
|
| 1747 |
+
print("Initializing a low-scale model")
|
| 1748 |
+
assert exists(low_scale_key)
|
| 1749 |
+
self.instantiate_low_stage(low_scale_config)
|
| 1750 |
+
self.low_scale_key = low_scale_key
|
| 1751 |
+
|
| 1752 |
+
def instantiate_low_stage(self, config):
|
| 1753 |
+
model = instantiate_from_config(config)
|
| 1754 |
+
self.low_scale_model = model.eval()
|
| 1755 |
+
self.low_scale_model.train = disabled_train
|
| 1756 |
+
for param in self.low_scale_model.parameters():
|
| 1757 |
+
param.requires_grad = False
|
| 1758 |
+
|
| 1759 |
+
@torch.no_grad()
|
| 1760 |
+
def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
|
| 1761 |
+
# note: restricted to non-trainable encoders currently
|
| 1762 |
+
assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
|
| 1763 |
+
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
| 1764 |
+
force_c_encode=True, return_original_cond=True, bs=bs)
|
| 1765 |
+
|
| 1766 |
+
assert exists(self.concat_keys)
|
| 1767 |
+
assert len(self.concat_keys) == 1
|
| 1768 |
+
# optionally make spatial noise_level here
|
| 1769 |
+
c_cat = list()
|
| 1770 |
+
noise_level = None
|
| 1771 |
+
for ck in self.concat_keys:
|
| 1772 |
+
cc = batch[ck]
|
| 1773 |
+
cc = rearrange(cc, 'b h w c -> b c h w')
|
| 1774 |
+
if exists(self.reshuffle_patch_size):
|
| 1775 |
+
assert isinstance(self.reshuffle_patch_size, int)
|
| 1776 |
+
cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
|
| 1777 |
+
p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
|
| 1778 |
+
if bs is not None:
|
| 1779 |
+
cc = cc[:bs]
|
| 1780 |
+
cc = cc.to(self.device)
|
| 1781 |
+
if exists(self.low_scale_model) and ck == self.low_scale_key:
|
| 1782 |
+
cc, noise_level = self.low_scale_model(cc)
|
| 1783 |
+
c_cat.append(cc)
|
| 1784 |
+
c_cat = torch.cat(c_cat, dim=1)
|
| 1785 |
+
if exists(noise_level):
|
| 1786 |
+
all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
|
| 1787 |
+
else:
|
| 1788 |
+
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
|
| 1789 |
+
if return_first_stage_outputs:
|
| 1790 |
+
return z, all_conds, x, xrec, xc
|
| 1791 |
+
return z, all_conds
|
| 1792 |
+
|
| 1793 |
+
@torch.no_grad()
|
| 1794 |
+
def log_images(self, *args, **kwargs):
|
| 1795 |
+
log = super().log_images(*args, **kwargs)
|
| 1796 |
+
log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
|
| 1797 |
+
return log
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/dpm_solver/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .sampler import DPMSolverSampler
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/dpm_solver/dpm_solver.py
ADDED
|
@@ -0,0 +1,1154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
import math
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class NoiseScheduleVP:
|
| 8 |
+
def __init__(
|
| 9 |
+
self,
|
| 10 |
+
schedule='discrete',
|
| 11 |
+
betas=None,
|
| 12 |
+
alphas_cumprod=None,
|
| 13 |
+
continuous_beta_0=0.1,
|
| 14 |
+
continuous_beta_1=20.,
|
| 15 |
+
):
|
| 16 |
+
"""Create a wrapper class for the forward SDE (VP type).
|
| 17 |
+
***
|
| 18 |
+
Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
|
| 19 |
+
We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
|
| 20 |
+
***
|
| 21 |
+
The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
|
| 22 |
+
We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
|
| 23 |
+
Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
|
| 24 |
+
log_alpha_t = self.marginal_log_mean_coeff(t)
|
| 25 |
+
sigma_t = self.marginal_std(t)
|
| 26 |
+
lambda_t = self.marginal_lambda(t)
|
| 27 |
+
Moreover, as lambda(t) is an invertible function, we also support its inverse function:
|
| 28 |
+
t = self.inverse_lambda(lambda_t)
|
| 29 |
+
===============================================================
|
| 30 |
+
We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
|
| 31 |
+
1. For discrete-time DPMs:
|
| 32 |
+
For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
|
| 33 |
+
t_i = (i + 1) / N
|
| 34 |
+
e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
|
| 35 |
+
We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
|
| 36 |
+
Args:
|
| 37 |
+
betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
|
| 38 |
+
alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
|
| 39 |
+
Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
|
| 40 |
+
**Important**: Please pay special attention for the args for `alphas_cumprod`:
|
| 41 |
+
The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
|
| 42 |
+
q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
|
| 43 |
+
Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
|
| 44 |
+
alpha_{t_n} = \sqrt{\hat{alpha_n}},
|
| 45 |
+
and
|
| 46 |
+
log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
|
| 47 |
+
2. For continuous-time DPMs:
|
| 48 |
+
We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
|
| 49 |
+
schedule are the default settings in DDPM and improved-DDPM:
|
| 50 |
+
Args:
|
| 51 |
+
beta_min: A `float` number. The smallest beta for the linear schedule.
|
| 52 |
+
beta_max: A `float` number. The largest beta for the linear schedule.
|
| 53 |
+
cosine_s: A `float` number. The hyperparameter in the cosine schedule.
|
| 54 |
+
cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
|
| 55 |
+
T: A `float` number. The ending time of the forward process.
|
| 56 |
+
===============================================================
|
| 57 |
+
Args:
|
| 58 |
+
schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
|
| 59 |
+
'linear' or 'cosine' for continuous-time DPMs.
|
| 60 |
+
Returns:
|
| 61 |
+
A wrapper object of the forward SDE (VP type).
|
| 62 |
+
|
| 63 |
+
===============================================================
|
| 64 |
+
Example:
|
| 65 |
+
# For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
|
| 66 |
+
>>> ns = NoiseScheduleVP('discrete', betas=betas)
|
| 67 |
+
# For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
|
| 68 |
+
>>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
|
| 69 |
+
# For continuous-time DPMs (VPSDE), linear schedule:
|
| 70 |
+
>>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
if schedule not in ['discrete', 'linear', 'cosine']:
|
| 74 |
+
raise ValueError(
|
| 75 |
+
"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
|
| 76 |
+
schedule))
|
| 77 |
+
|
| 78 |
+
self.schedule = schedule
|
| 79 |
+
if schedule == 'discrete':
|
| 80 |
+
if betas is not None:
|
| 81 |
+
log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
|
| 82 |
+
else:
|
| 83 |
+
assert alphas_cumprod is not None
|
| 84 |
+
log_alphas = 0.5 * torch.log(alphas_cumprod)
|
| 85 |
+
self.total_N = len(log_alphas)
|
| 86 |
+
self.T = 1.
|
| 87 |
+
self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
|
| 88 |
+
self.log_alpha_array = log_alphas.reshape((1, -1,))
|
| 89 |
+
else:
|
| 90 |
+
self.total_N = 1000
|
| 91 |
+
self.beta_0 = continuous_beta_0
|
| 92 |
+
self.beta_1 = continuous_beta_1
|
| 93 |
+
self.cosine_s = 0.008
|
| 94 |
+
self.cosine_beta_max = 999.
|
| 95 |
+
self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
|
| 96 |
+
1. + self.cosine_s) / math.pi - self.cosine_s
|
| 97 |
+
self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
|
| 98 |
+
self.schedule = schedule
|
| 99 |
+
if schedule == 'cosine':
|
| 100 |
+
# For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
|
| 101 |
+
# Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
|
| 102 |
+
self.T = 0.9946
|
| 103 |
+
else:
|
| 104 |
+
self.T = 1.
|
| 105 |
+
|
| 106 |
+
def marginal_log_mean_coeff(self, t):
|
| 107 |
+
"""
|
| 108 |
+
Compute log(alpha_t) of a given continuous-time label t in [0, T].
|
| 109 |
+
"""
|
| 110 |
+
if self.schedule == 'discrete':
|
| 111 |
+
return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
|
| 112 |
+
self.log_alpha_array.to(t.device)).reshape((-1))
|
| 113 |
+
elif self.schedule == 'linear':
|
| 114 |
+
return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
|
| 115 |
+
elif self.schedule == 'cosine':
|
| 116 |
+
log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
|
| 117 |
+
log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
|
| 118 |
+
return log_alpha_t
|
| 119 |
+
|
| 120 |
+
def marginal_alpha(self, t):
|
| 121 |
+
"""
|
| 122 |
+
Compute alpha_t of a given continuous-time label t in [0, T].
|
| 123 |
+
"""
|
| 124 |
+
return torch.exp(self.marginal_log_mean_coeff(t))
|
| 125 |
+
|
| 126 |
+
def marginal_std(self, t):
|
| 127 |
+
"""
|
| 128 |
+
Compute sigma_t of a given continuous-time label t in [0, T].
|
| 129 |
+
"""
|
| 130 |
+
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
|
| 131 |
+
|
| 132 |
+
def marginal_lambda(self, t):
|
| 133 |
+
"""
|
| 134 |
+
Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
|
| 135 |
+
"""
|
| 136 |
+
log_mean_coeff = self.marginal_log_mean_coeff(t)
|
| 137 |
+
log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
|
| 138 |
+
return log_mean_coeff - log_std
|
| 139 |
+
|
| 140 |
+
def inverse_lambda(self, lamb):
|
| 141 |
+
"""
|
| 142 |
+
Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
|
| 143 |
+
"""
|
| 144 |
+
if self.schedule == 'linear':
|
| 145 |
+
tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
| 146 |
+
Delta = self.beta_0 ** 2 + tmp
|
| 147 |
+
return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
|
| 148 |
+
elif self.schedule == 'discrete':
|
| 149 |
+
log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
|
| 150 |
+
t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
|
| 151 |
+
torch.flip(self.t_array.to(lamb.device), [1]))
|
| 152 |
+
return t.reshape((-1,))
|
| 153 |
+
else:
|
| 154 |
+
log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
| 155 |
+
t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
|
| 156 |
+
1. + self.cosine_s) / math.pi - self.cosine_s
|
| 157 |
+
t = t_fn(log_alpha)
|
| 158 |
+
return t
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def model_wrapper(
|
| 162 |
+
model,
|
| 163 |
+
noise_schedule,
|
| 164 |
+
model_type="noise",
|
| 165 |
+
model_kwargs={},
|
| 166 |
+
guidance_type="uncond",
|
| 167 |
+
condition=None,
|
| 168 |
+
unconditional_condition=None,
|
| 169 |
+
guidance_scale=1.,
|
| 170 |
+
classifier_fn=None,
|
| 171 |
+
classifier_kwargs={},
|
| 172 |
+
):
|
| 173 |
+
"""Create a wrapper function for the noise prediction model.
|
| 174 |
+
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
|
| 175 |
+
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
|
| 176 |
+
We support four types of the diffusion model by setting `model_type`:
|
| 177 |
+
1. "noise": noise prediction model. (Trained by predicting noise).
|
| 178 |
+
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
|
| 179 |
+
3. "v": velocity prediction model. (Trained by predicting the velocity).
|
| 180 |
+
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
|
| 181 |
+
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
|
| 182 |
+
arXiv preprint arXiv:2202.00512 (2022).
|
| 183 |
+
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
|
| 184 |
+
arXiv preprint arXiv:2210.02303 (2022).
|
| 185 |
+
|
| 186 |
+
4. "score": marginal score function. (Trained by denoising score matching).
|
| 187 |
+
Note that the score function and the noise prediction model follows a simple relationship:
|
| 188 |
+
```
|
| 189 |
+
noise(x_t, t) = -sigma_t * score(x_t, t)
|
| 190 |
+
```
|
| 191 |
+
We support three types of guided sampling by DPMs by setting `guidance_type`:
|
| 192 |
+
1. "uncond": unconditional sampling by DPMs.
|
| 193 |
+
The input `model` has the following format:
|
| 194 |
+
``
|
| 195 |
+
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
| 196 |
+
``
|
| 197 |
+
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
|
| 198 |
+
The input `model` has the following format:
|
| 199 |
+
``
|
| 200 |
+
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
| 201 |
+
``
|
| 202 |
+
The input `classifier_fn` has the following format:
|
| 203 |
+
``
|
| 204 |
+
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
|
| 205 |
+
``
|
| 206 |
+
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
|
| 207 |
+
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
|
| 208 |
+
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
|
| 209 |
+
The input `model` has the following format:
|
| 210 |
+
``
|
| 211 |
+
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
|
| 212 |
+
``
|
| 213 |
+
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
|
| 214 |
+
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
|
| 215 |
+
arXiv preprint arXiv:2207.12598 (2022).
|
| 216 |
+
|
| 217 |
+
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
|
| 218 |
+
or continuous-time labels (i.e. epsilon to T).
|
| 219 |
+
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
|
| 220 |
+
``
|
| 221 |
+
def model_fn(x, t_continuous) -> noise:
|
| 222 |
+
t_input = get_model_input_time(t_continuous)
|
| 223 |
+
return noise_pred(model, x, t_input, **model_kwargs)
|
| 224 |
+
``
|
| 225 |
+
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
|
| 226 |
+
===============================================================
|
| 227 |
+
Args:
|
| 228 |
+
model: A diffusion model with the corresponding format described above.
|
| 229 |
+
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
|
| 230 |
+
model_type: A `str`. The parameterization type of the diffusion model.
|
| 231 |
+
"noise" or "x_start" or "v" or "score".
|
| 232 |
+
model_kwargs: A `dict`. A dict for the other inputs of the model function.
|
| 233 |
+
guidance_type: A `str`. The type of the guidance for sampling.
|
| 234 |
+
"uncond" or "classifier" or "classifier-free".
|
| 235 |
+
condition: A pytorch tensor. The condition for the guided sampling.
|
| 236 |
+
Only used for "classifier" or "classifier-free" guidance type.
|
| 237 |
+
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
|
| 238 |
+
Only used for "classifier-free" guidance type.
|
| 239 |
+
guidance_scale: A `float`. The scale for the guided sampling.
|
| 240 |
+
classifier_fn: A classifier function. Only used for the classifier guidance.
|
| 241 |
+
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
|
| 242 |
+
Returns:
|
| 243 |
+
A noise prediction model that accepts the noised data and the continuous time as the inputs.
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
def get_model_input_time(t_continuous):
|
| 247 |
+
"""
|
| 248 |
+
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
|
| 249 |
+
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
|
| 250 |
+
For continuous-time DPMs, we just use `t_continuous`.
|
| 251 |
+
"""
|
| 252 |
+
if noise_schedule.schedule == 'discrete':
|
| 253 |
+
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
|
| 254 |
+
else:
|
| 255 |
+
return t_continuous
|
| 256 |
+
|
| 257 |
+
def noise_pred_fn(x, t_continuous, cond=None):
|
| 258 |
+
if t_continuous.reshape((-1,)).shape[0] == 1:
|
| 259 |
+
t_continuous = t_continuous.expand((x.shape[0]))
|
| 260 |
+
t_input = get_model_input_time(t_continuous)
|
| 261 |
+
if cond is None:
|
| 262 |
+
output = model(x, t_input, **model_kwargs)
|
| 263 |
+
else:
|
| 264 |
+
output = model(x, t_input, cond, **model_kwargs)
|
| 265 |
+
if model_type == "noise":
|
| 266 |
+
return output
|
| 267 |
+
elif model_type == "x_start":
|
| 268 |
+
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
| 269 |
+
dims = x.dim()
|
| 270 |
+
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
|
| 271 |
+
elif model_type == "v":
|
| 272 |
+
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
| 273 |
+
dims = x.dim()
|
| 274 |
+
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
|
| 275 |
+
elif model_type == "score":
|
| 276 |
+
sigma_t = noise_schedule.marginal_std(t_continuous)
|
| 277 |
+
dims = x.dim()
|
| 278 |
+
return -expand_dims(sigma_t, dims) * output
|
| 279 |
+
|
| 280 |
+
def cond_grad_fn(x, t_input):
|
| 281 |
+
"""
|
| 282 |
+
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
|
| 283 |
+
"""
|
| 284 |
+
with torch.enable_grad():
|
| 285 |
+
x_in = x.detach().requires_grad_(True)
|
| 286 |
+
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
|
| 287 |
+
return torch.autograd.grad(log_prob.sum(), x_in)[0]
|
| 288 |
+
|
| 289 |
+
def model_fn(x, t_continuous):
|
| 290 |
+
"""
|
| 291 |
+
The noise predicition model function that is used for DPM-Solver.
|
| 292 |
+
"""
|
| 293 |
+
if t_continuous.reshape((-1,)).shape[0] == 1:
|
| 294 |
+
t_continuous = t_continuous.expand((x.shape[0]))
|
| 295 |
+
if guidance_type == "uncond":
|
| 296 |
+
return noise_pred_fn(x, t_continuous)
|
| 297 |
+
elif guidance_type == "classifier":
|
| 298 |
+
assert classifier_fn is not None
|
| 299 |
+
t_input = get_model_input_time(t_continuous)
|
| 300 |
+
cond_grad = cond_grad_fn(x, t_input)
|
| 301 |
+
sigma_t = noise_schedule.marginal_std(t_continuous)
|
| 302 |
+
noise = noise_pred_fn(x, t_continuous)
|
| 303 |
+
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
|
| 304 |
+
elif guidance_type == "classifier-free":
|
| 305 |
+
if guidance_scale == 1. or unconditional_condition is None:
|
| 306 |
+
return noise_pred_fn(x, t_continuous, cond=condition)
|
| 307 |
+
else:
|
| 308 |
+
x_in = torch.cat([x] * 2)
|
| 309 |
+
t_in = torch.cat([t_continuous] * 2)
|
| 310 |
+
c_in = torch.cat([unconditional_condition, condition])
|
| 311 |
+
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
|
| 312 |
+
return noise_uncond + guidance_scale * (noise - noise_uncond)
|
| 313 |
+
|
| 314 |
+
assert model_type in ["noise", "x_start", "v"]
|
| 315 |
+
assert guidance_type in ["uncond", "classifier", "classifier-free"]
|
| 316 |
+
return model_fn
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
class DPM_Solver:
|
| 320 |
+
def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
|
| 321 |
+
"""Construct a DPM-Solver.
|
| 322 |
+
We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
|
| 323 |
+
If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
|
| 324 |
+
If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
|
| 325 |
+
In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
|
| 326 |
+
The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
|
| 327 |
+
Args:
|
| 328 |
+
model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
|
| 329 |
+
``
|
| 330 |
+
def model_fn(x, t_continuous):
|
| 331 |
+
return noise
|
| 332 |
+
``
|
| 333 |
+
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
|
| 334 |
+
predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
|
| 335 |
+
thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
|
| 336 |
+
max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
|
| 337 |
+
|
| 338 |
+
[1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
|
| 339 |
+
"""
|
| 340 |
+
self.model = model_fn
|
| 341 |
+
self.noise_schedule = noise_schedule
|
| 342 |
+
self.predict_x0 = predict_x0
|
| 343 |
+
self.thresholding = thresholding
|
| 344 |
+
self.max_val = max_val
|
| 345 |
+
|
| 346 |
+
def noise_prediction_fn(self, x, t):
|
| 347 |
+
"""
|
| 348 |
+
Return the noise prediction model.
|
| 349 |
+
"""
|
| 350 |
+
return self.model(x, t)
|
| 351 |
+
|
| 352 |
+
def data_prediction_fn(self, x, t):
|
| 353 |
+
"""
|
| 354 |
+
Return the data prediction model (with thresholding).
|
| 355 |
+
"""
|
| 356 |
+
noise = self.noise_prediction_fn(x, t)
|
| 357 |
+
dims = x.dim()
|
| 358 |
+
alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
|
| 359 |
+
x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
|
| 360 |
+
if self.thresholding:
|
| 361 |
+
p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
|
| 362 |
+
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
|
| 363 |
+
s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
|
| 364 |
+
x0 = torch.clamp(x0, -s, s) / s
|
| 365 |
+
return x0
|
| 366 |
+
|
| 367 |
+
def model_fn(self, x, t):
|
| 368 |
+
"""
|
| 369 |
+
Convert the model to the noise prediction model or the data prediction model.
|
| 370 |
+
"""
|
| 371 |
+
if self.predict_x0:
|
| 372 |
+
return self.data_prediction_fn(x, t)
|
| 373 |
+
else:
|
| 374 |
+
return self.noise_prediction_fn(x, t)
|
| 375 |
+
|
| 376 |
+
def get_time_steps(self, skip_type, t_T, t_0, N, device):
|
| 377 |
+
"""Compute the intermediate time steps for sampling.
|
| 378 |
+
Args:
|
| 379 |
+
skip_type: A `str`. The type for the spacing of the time steps. We support three types:
|
| 380 |
+
- 'logSNR': uniform logSNR for the time steps.
|
| 381 |
+
- 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
|
| 382 |
+
- 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
|
| 383 |
+
t_T: A `float`. The starting time of the sampling (default is T).
|
| 384 |
+
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
| 385 |
+
N: A `int`. The total number of the spacing of the time steps.
|
| 386 |
+
device: A torch device.
|
| 387 |
+
Returns:
|
| 388 |
+
A pytorch tensor of the time steps, with the shape (N + 1,).
|
| 389 |
+
"""
|
| 390 |
+
if skip_type == 'logSNR':
|
| 391 |
+
lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
|
| 392 |
+
lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
|
| 393 |
+
logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
|
| 394 |
+
return self.noise_schedule.inverse_lambda(logSNR_steps)
|
| 395 |
+
elif skip_type == 'time_uniform':
|
| 396 |
+
return torch.linspace(t_T, t_0, N + 1).to(device)
|
| 397 |
+
elif skip_type == 'time_quadratic':
|
| 398 |
+
t_order = 2
|
| 399 |
+
t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
|
| 400 |
+
return t
|
| 401 |
+
else:
|
| 402 |
+
raise ValueError(
|
| 403 |
+
"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
|
| 404 |
+
|
| 405 |
+
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
|
| 406 |
+
"""
|
| 407 |
+
Get the order of each step for sampling by the singlestep DPM-Solver.
|
| 408 |
+
We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
|
| 409 |
+
Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
|
| 410 |
+
- If order == 1:
|
| 411 |
+
We take `steps` of DPM-Solver-1 (i.e. DDIM).
|
| 412 |
+
- If order == 2:
|
| 413 |
+
- Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
|
| 414 |
+
- If steps % 2 == 0, we use K steps of DPM-Solver-2.
|
| 415 |
+
- If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
|
| 416 |
+
- If order == 3:
|
| 417 |
+
- Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
|
| 418 |
+
- If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
|
| 419 |
+
- If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
|
| 420 |
+
- If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
|
| 421 |
+
============================================
|
| 422 |
+
Args:
|
| 423 |
+
order: A `int`. The max order for the solver (2 or 3).
|
| 424 |
+
steps: A `int`. The total number of function evaluations (NFE).
|
| 425 |
+
skip_type: A `str`. The type for the spacing of the time steps. We support three types:
|
| 426 |
+
- 'logSNR': uniform logSNR for the time steps.
|
| 427 |
+
- 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
|
| 428 |
+
- 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
|
| 429 |
+
t_T: A `float`. The starting time of the sampling (default is T).
|
| 430 |
+
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
| 431 |
+
device: A torch device.
|
| 432 |
+
Returns:
|
| 433 |
+
orders: A list of the solver order of each step.
|
| 434 |
+
"""
|
| 435 |
+
if order == 3:
|
| 436 |
+
K = steps // 3 + 1
|
| 437 |
+
if steps % 3 == 0:
|
| 438 |
+
orders = [3, ] * (K - 2) + [2, 1]
|
| 439 |
+
elif steps % 3 == 1:
|
| 440 |
+
orders = [3, ] * (K - 1) + [1]
|
| 441 |
+
else:
|
| 442 |
+
orders = [3, ] * (K - 1) + [2]
|
| 443 |
+
elif order == 2:
|
| 444 |
+
if steps % 2 == 0:
|
| 445 |
+
K = steps // 2
|
| 446 |
+
orders = [2, ] * K
|
| 447 |
+
else:
|
| 448 |
+
K = steps // 2 + 1
|
| 449 |
+
orders = [2, ] * (K - 1) + [1]
|
| 450 |
+
elif order == 1:
|
| 451 |
+
K = 1
|
| 452 |
+
orders = [1, ] * steps
|
| 453 |
+
else:
|
| 454 |
+
raise ValueError("'order' must be '1' or '2' or '3'.")
|
| 455 |
+
if skip_type == 'logSNR':
|
| 456 |
+
# To reproduce the results in DPM-Solver paper
|
| 457 |
+
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
|
| 458 |
+
else:
|
| 459 |
+
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
|
| 460 |
+
torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
|
| 461 |
+
return timesteps_outer, orders
|
| 462 |
+
|
| 463 |
+
def denoise_to_zero_fn(self, x, s):
|
| 464 |
+
"""
|
| 465 |
+
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
|
| 466 |
+
"""
|
| 467 |
+
return self.data_prediction_fn(x, s)
|
| 468 |
+
|
| 469 |
+
def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
|
| 470 |
+
"""
|
| 471 |
+
DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
|
| 472 |
+
Args:
|
| 473 |
+
x: A pytorch tensor. The initial value at time `s`.
|
| 474 |
+
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
| 475 |
+
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
| 476 |
+
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
| 477 |
+
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
| 478 |
+
return_intermediate: A `bool`. If true, also return the model value at time `s`.
|
| 479 |
+
Returns:
|
| 480 |
+
x_t: A pytorch tensor. The approximated solution at time `t`.
|
| 481 |
+
"""
|
| 482 |
+
ns = self.noise_schedule
|
| 483 |
+
dims = x.dim()
|
| 484 |
+
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
| 485 |
+
h = lambda_t - lambda_s
|
| 486 |
+
log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
|
| 487 |
+
sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
|
| 488 |
+
alpha_t = torch.exp(log_alpha_t)
|
| 489 |
+
|
| 490 |
+
if self.predict_x0:
|
| 491 |
+
phi_1 = torch.expm1(-h)
|
| 492 |
+
if model_s is None:
|
| 493 |
+
model_s = self.model_fn(x, s)
|
| 494 |
+
x_t = (
|
| 495 |
+
expand_dims(sigma_t / sigma_s, dims) * x
|
| 496 |
+
- expand_dims(alpha_t * phi_1, dims) * model_s
|
| 497 |
+
)
|
| 498 |
+
if return_intermediate:
|
| 499 |
+
return x_t, {'model_s': model_s}
|
| 500 |
+
else:
|
| 501 |
+
return x_t
|
| 502 |
+
else:
|
| 503 |
+
phi_1 = torch.expm1(h)
|
| 504 |
+
if model_s is None:
|
| 505 |
+
model_s = self.model_fn(x, s)
|
| 506 |
+
x_t = (
|
| 507 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
| 508 |
+
- expand_dims(sigma_t * phi_1, dims) * model_s
|
| 509 |
+
)
|
| 510 |
+
if return_intermediate:
|
| 511 |
+
return x_t, {'model_s': model_s}
|
| 512 |
+
else:
|
| 513 |
+
return x_t
|
| 514 |
+
|
| 515 |
+
def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
|
| 516 |
+
solver_type='dpm_solver'):
|
| 517 |
+
"""
|
| 518 |
+
Singlestep solver DPM-Solver-2 from time `s` to time `t`.
|
| 519 |
+
Args:
|
| 520 |
+
x: A pytorch tensor. The initial value at time `s`.
|
| 521 |
+
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
| 522 |
+
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
| 523 |
+
r1: A `float`. The hyperparameter of the second-order solver.
|
| 524 |
+
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
| 525 |
+
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
| 526 |
+
return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
|
| 527 |
+
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
| 528 |
+
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
| 529 |
+
Returns:
|
| 530 |
+
x_t: A pytorch tensor. The approximated solution at time `t`.
|
| 531 |
+
"""
|
| 532 |
+
if solver_type not in ['dpm_solver', 'taylor']:
|
| 533 |
+
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
| 534 |
+
if r1 is None:
|
| 535 |
+
r1 = 0.5
|
| 536 |
+
ns = self.noise_schedule
|
| 537 |
+
dims = x.dim()
|
| 538 |
+
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
| 539 |
+
h = lambda_t - lambda_s
|
| 540 |
+
lambda_s1 = lambda_s + r1 * h
|
| 541 |
+
s1 = ns.inverse_lambda(lambda_s1)
|
| 542 |
+
log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
|
| 543 |
+
s1), ns.marginal_log_mean_coeff(t)
|
| 544 |
+
sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
|
| 545 |
+
alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
|
| 546 |
+
|
| 547 |
+
if self.predict_x0:
|
| 548 |
+
phi_11 = torch.expm1(-r1 * h)
|
| 549 |
+
phi_1 = torch.expm1(-h)
|
| 550 |
+
|
| 551 |
+
if model_s is None:
|
| 552 |
+
model_s = self.model_fn(x, s)
|
| 553 |
+
x_s1 = (
|
| 554 |
+
expand_dims(sigma_s1 / sigma_s, dims) * x
|
| 555 |
+
- expand_dims(alpha_s1 * phi_11, dims) * model_s
|
| 556 |
+
)
|
| 557 |
+
model_s1 = self.model_fn(x_s1, s1)
|
| 558 |
+
if solver_type == 'dpm_solver':
|
| 559 |
+
x_t = (
|
| 560 |
+
expand_dims(sigma_t / sigma_s, dims) * x
|
| 561 |
+
- expand_dims(alpha_t * phi_1, dims) * model_s
|
| 562 |
+
- (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
|
| 563 |
+
)
|
| 564 |
+
elif solver_type == 'taylor':
|
| 565 |
+
x_t = (
|
| 566 |
+
expand_dims(sigma_t / sigma_s, dims) * x
|
| 567 |
+
- expand_dims(alpha_t * phi_1, dims) * model_s
|
| 568 |
+
+ (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
|
| 569 |
+
model_s1 - model_s)
|
| 570 |
+
)
|
| 571 |
+
else:
|
| 572 |
+
phi_11 = torch.expm1(r1 * h)
|
| 573 |
+
phi_1 = torch.expm1(h)
|
| 574 |
+
|
| 575 |
+
if model_s is None:
|
| 576 |
+
model_s = self.model_fn(x, s)
|
| 577 |
+
x_s1 = (
|
| 578 |
+
expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
|
| 579 |
+
- expand_dims(sigma_s1 * phi_11, dims) * model_s
|
| 580 |
+
)
|
| 581 |
+
model_s1 = self.model_fn(x_s1, s1)
|
| 582 |
+
if solver_type == 'dpm_solver':
|
| 583 |
+
x_t = (
|
| 584 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
| 585 |
+
- expand_dims(sigma_t * phi_1, dims) * model_s
|
| 586 |
+
- (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
|
| 587 |
+
)
|
| 588 |
+
elif solver_type == 'taylor':
|
| 589 |
+
x_t = (
|
| 590 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
| 591 |
+
- expand_dims(sigma_t * phi_1, dims) * model_s
|
| 592 |
+
- (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
|
| 593 |
+
)
|
| 594 |
+
if return_intermediate:
|
| 595 |
+
return x_t, {'model_s': model_s, 'model_s1': model_s1}
|
| 596 |
+
else:
|
| 597 |
+
return x_t
|
| 598 |
+
|
| 599 |
+
def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
|
| 600 |
+
return_intermediate=False, solver_type='dpm_solver'):
|
| 601 |
+
"""
|
| 602 |
+
Singlestep solver DPM-Solver-3 from time `s` to time `t`.
|
| 603 |
+
Args:
|
| 604 |
+
x: A pytorch tensor. The initial value at time `s`.
|
| 605 |
+
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
| 606 |
+
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
| 607 |
+
r1: A `float`. The hyperparameter of the third-order solver.
|
| 608 |
+
r2: A `float`. The hyperparameter of the third-order solver.
|
| 609 |
+
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
| 610 |
+
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
| 611 |
+
model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
|
| 612 |
+
If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
|
| 613 |
+
return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
|
| 614 |
+
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
| 615 |
+
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
| 616 |
+
Returns:
|
| 617 |
+
x_t: A pytorch tensor. The approximated solution at time `t`.
|
| 618 |
+
"""
|
| 619 |
+
if solver_type not in ['dpm_solver', 'taylor']:
|
| 620 |
+
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
| 621 |
+
if r1 is None:
|
| 622 |
+
r1 = 1. / 3.
|
| 623 |
+
if r2 is None:
|
| 624 |
+
r2 = 2. / 3.
|
| 625 |
+
ns = self.noise_schedule
|
| 626 |
+
dims = x.dim()
|
| 627 |
+
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
| 628 |
+
h = lambda_t - lambda_s
|
| 629 |
+
lambda_s1 = lambda_s + r1 * h
|
| 630 |
+
lambda_s2 = lambda_s + r2 * h
|
| 631 |
+
s1 = ns.inverse_lambda(lambda_s1)
|
| 632 |
+
s2 = ns.inverse_lambda(lambda_s2)
|
| 633 |
+
log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
|
| 634 |
+
s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
|
| 635 |
+
sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
|
| 636 |
+
s2), ns.marginal_std(t)
|
| 637 |
+
alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
|
| 638 |
+
|
| 639 |
+
if self.predict_x0:
|
| 640 |
+
phi_11 = torch.expm1(-r1 * h)
|
| 641 |
+
phi_12 = torch.expm1(-r2 * h)
|
| 642 |
+
phi_1 = torch.expm1(-h)
|
| 643 |
+
phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
|
| 644 |
+
phi_2 = phi_1 / h + 1.
|
| 645 |
+
phi_3 = phi_2 / h - 0.5
|
| 646 |
+
|
| 647 |
+
if model_s is None:
|
| 648 |
+
model_s = self.model_fn(x, s)
|
| 649 |
+
if model_s1 is None:
|
| 650 |
+
x_s1 = (
|
| 651 |
+
expand_dims(sigma_s1 / sigma_s, dims) * x
|
| 652 |
+
- expand_dims(alpha_s1 * phi_11, dims) * model_s
|
| 653 |
+
)
|
| 654 |
+
model_s1 = self.model_fn(x_s1, s1)
|
| 655 |
+
x_s2 = (
|
| 656 |
+
expand_dims(sigma_s2 / sigma_s, dims) * x
|
| 657 |
+
- expand_dims(alpha_s2 * phi_12, dims) * model_s
|
| 658 |
+
+ r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
|
| 659 |
+
)
|
| 660 |
+
model_s2 = self.model_fn(x_s2, s2)
|
| 661 |
+
if solver_type == 'dpm_solver':
|
| 662 |
+
x_t = (
|
| 663 |
+
expand_dims(sigma_t / sigma_s, dims) * x
|
| 664 |
+
- expand_dims(alpha_t * phi_1, dims) * model_s
|
| 665 |
+
+ (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
|
| 666 |
+
)
|
| 667 |
+
elif solver_type == 'taylor':
|
| 668 |
+
D1_0 = (1. / r1) * (model_s1 - model_s)
|
| 669 |
+
D1_1 = (1. / r2) * (model_s2 - model_s)
|
| 670 |
+
D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
|
| 671 |
+
D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
|
| 672 |
+
x_t = (
|
| 673 |
+
expand_dims(sigma_t / sigma_s, dims) * x
|
| 674 |
+
- expand_dims(alpha_t * phi_1, dims) * model_s
|
| 675 |
+
+ expand_dims(alpha_t * phi_2, dims) * D1
|
| 676 |
+
- expand_dims(alpha_t * phi_3, dims) * D2
|
| 677 |
+
)
|
| 678 |
+
else:
|
| 679 |
+
phi_11 = torch.expm1(r1 * h)
|
| 680 |
+
phi_12 = torch.expm1(r2 * h)
|
| 681 |
+
phi_1 = torch.expm1(h)
|
| 682 |
+
phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
|
| 683 |
+
phi_2 = phi_1 / h - 1.
|
| 684 |
+
phi_3 = phi_2 / h - 0.5
|
| 685 |
+
|
| 686 |
+
if model_s is None:
|
| 687 |
+
model_s = self.model_fn(x, s)
|
| 688 |
+
if model_s1 is None:
|
| 689 |
+
x_s1 = (
|
| 690 |
+
expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
|
| 691 |
+
- expand_dims(sigma_s1 * phi_11, dims) * model_s
|
| 692 |
+
)
|
| 693 |
+
model_s1 = self.model_fn(x_s1, s1)
|
| 694 |
+
x_s2 = (
|
| 695 |
+
expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
|
| 696 |
+
- expand_dims(sigma_s2 * phi_12, dims) * model_s
|
| 697 |
+
- r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
|
| 698 |
+
)
|
| 699 |
+
model_s2 = self.model_fn(x_s2, s2)
|
| 700 |
+
if solver_type == 'dpm_solver':
|
| 701 |
+
x_t = (
|
| 702 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
| 703 |
+
- expand_dims(sigma_t * phi_1, dims) * model_s
|
| 704 |
+
- (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
|
| 705 |
+
)
|
| 706 |
+
elif solver_type == 'taylor':
|
| 707 |
+
D1_0 = (1. / r1) * (model_s1 - model_s)
|
| 708 |
+
D1_1 = (1. / r2) * (model_s2 - model_s)
|
| 709 |
+
D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
|
| 710 |
+
D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
|
| 711 |
+
x_t = (
|
| 712 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
| 713 |
+
- expand_dims(sigma_t * phi_1, dims) * model_s
|
| 714 |
+
- expand_dims(sigma_t * phi_2, dims) * D1
|
| 715 |
+
- expand_dims(sigma_t * phi_3, dims) * D2
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
if return_intermediate:
|
| 719 |
+
return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
|
| 720 |
+
else:
|
| 721 |
+
return x_t
|
| 722 |
+
|
| 723 |
+
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
|
| 724 |
+
"""
|
| 725 |
+
Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
|
| 726 |
+
Args:
|
| 727 |
+
x: A pytorch tensor. The initial value at time `s`.
|
| 728 |
+
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
| 729 |
+
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
| 730 |
+
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
| 731 |
+
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
| 732 |
+
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
| 733 |
+
Returns:
|
| 734 |
+
x_t: A pytorch tensor. The approximated solution at time `t`.
|
| 735 |
+
"""
|
| 736 |
+
if solver_type not in ['dpm_solver', 'taylor']:
|
| 737 |
+
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
| 738 |
+
ns = self.noise_schedule
|
| 739 |
+
dims = x.dim()
|
| 740 |
+
model_prev_1, model_prev_0 = model_prev_list
|
| 741 |
+
t_prev_1, t_prev_0 = t_prev_list
|
| 742 |
+
lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
|
| 743 |
+
t_prev_0), ns.marginal_lambda(t)
|
| 744 |
+
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
| 745 |
+
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
| 746 |
+
alpha_t = torch.exp(log_alpha_t)
|
| 747 |
+
|
| 748 |
+
h_0 = lambda_prev_0 - lambda_prev_1
|
| 749 |
+
h = lambda_t - lambda_prev_0
|
| 750 |
+
r0 = h_0 / h
|
| 751 |
+
D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
|
| 752 |
+
if self.predict_x0:
|
| 753 |
+
if solver_type == 'dpm_solver':
|
| 754 |
+
x_t = (
|
| 755 |
+
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
| 756 |
+
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
| 757 |
+
- 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
|
| 758 |
+
)
|
| 759 |
+
elif solver_type == 'taylor':
|
| 760 |
+
x_t = (
|
| 761 |
+
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
| 762 |
+
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
| 763 |
+
+ expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
|
| 764 |
+
)
|
| 765 |
+
else:
|
| 766 |
+
if solver_type == 'dpm_solver':
|
| 767 |
+
x_t = (
|
| 768 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
| 769 |
+
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
| 770 |
+
- 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
|
| 771 |
+
)
|
| 772 |
+
elif solver_type == 'taylor':
|
| 773 |
+
x_t = (
|
| 774 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
| 775 |
+
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
| 776 |
+
- expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
|
| 777 |
+
)
|
| 778 |
+
return x_t
|
| 779 |
+
|
| 780 |
+
def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
|
| 781 |
+
"""
|
| 782 |
+
Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
|
| 783 |
+
Args:
|
| 784 |
+
x: A pytorch tensor. The initial value at time `s`.
|
| 785 |
+
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
| 786 |
+
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
| 787 |
+
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
| 788 |
+
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
| 789 |
+
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
| 790 |
+
Returns:
|
| 791 |
+
x_t: A pytorch tensor. The approximated solution at time `t`.
|
| 792 |
+
"""
|
| 793 |
+
ns = self.noise_schedule
|
| 794 |
+
dims = x.dim()
|
| 795 |
+
model_prev_2, model_prev_1, model_prev_0 = model_prev_list
|
| 796 |
+
t_prev_2, t_prev_1, t_prev_0 = t_prev_list
|
| 797 |
+
lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
|
| 798 |
+
t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
|
| 799 |
+
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
| 800 |
+
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
| 801 |
+
alpha_t = torch.exp(log_alpha_t)
|
| 802 |
+
|
| 803 |
+
h_1 = lambda_prev_1 - lambda_prev_2
|
| 804 |
+
h_0 = lambda_prev_0 - lambda_prev_1
|
| 805 |
+
h = lambda_t - lambda_prev_0
|
| 806 |
+
r0, r1 = h_0 / h, h_1 / h
|
| 807 |
+
D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
|
| 808 |
+
D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
|
| 809 |
+
D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
|
| 810 |
+
D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
|
| 811 |
+
if self.predict_x0:
|
| 812 |
+
x_t = (
|
| 813 |
+
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
| 814 |
+
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
| 815 |
+
+ expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
|
| 816 |
+
- expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
|
| 817 |
+
)
|
| 818 |
+
else:
|
| 819 |
+
x_t = (
|
| 820 |
+
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
| 821 |
+
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
| 822 |
+
- expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
|
| 823 |
+
- expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
|
| 824 |
+
)
|
| 825 |
+
return x_t
|
| 826 |
+
|
| 827 |
+
def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
|
| 828 |
+
r2=None):
|
| 829 |
+
"""
|
| 830 |
+
Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
|
| 831 |
+
Args:
|
| 832 |
+
x: A pytorch tensor. The initial value at time `s`.
|
| 833 |
+
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
| 834 |
+
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
| 835 |
+
order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
|
| 836 |
+
return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
|
| 837 |
+
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
| 838 |
+
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
| 839 |
+
r1: A `float`. The hyperparameter of the second-order or third-order solver.
|
| 840 |
+
r2: A `float`. The hyperparameter of the third-order solver.
|
| 841 |
+
Returns:
|
| 842 |
+
x_t: A pytorch tensor. The approximated solution at time `t`.
|
| 843 |
+
"""
|
| 844 |
+
if order == 1:
|
| 845 |
+
return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
|
| 846 |
+
elif order == 2:
|
| 847 |
+
return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
|
| 848 |
+
solver_type=solver_type, r1=r1)
|
| 849 |
+
elif order == 3:
|
| 850 |
+
return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
|
| 851 |
+
solver_type=solver_type, r1=r1, r2=r2)
|
| 852 |
+
else:
|
| 853 |
+
raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
|
| 854 |
+
|
| 855 |
+
def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
|
| 856 |
+
"""
|
| 857 |
+
Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
|
| 858 |
+
Args:
|
| 859 |
+
x: A pytorch tensor. The initial value at time `s`.
|
| 860 |
+
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
| 861 |
+
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
| 862 |
+
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
| 863 |
+
order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
|
| 864 |
+
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
| 865 |
+
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
| 866 |
+
Returns:
|
| 867 |
+
x_t: A pytorch tensor. The approximated solution at time `t`.
|
| 868 |
+
"""
|
| 869 |
+
if order == 1:
|
| 870 |
+
return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
|
| 871 |
+
elif order == 2:
|
| 872 |
+
return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
|
| 873 |
+
elif order == 3:
|
| 874 |
+
return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
|
| 875 |
+
else:
|
| 876 |
+
raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
|
| 877 |
+
|
| 878 |
+
def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
|
| 879 |
+
solver_type='dpm_solver'):
|
| 880 |
+
"""
|
| 881 |
+
The adaptive step size solver based on singlestep DPM-Solver.
|
| 882 |
+
Args:
|
| 883 |
+
x: A pytorch tensor. The initial value at time `t_T`.
|
| 884 |
+
order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
|
| 885 |
+
t_T: A `float`. The starting time of the sampling (default is T).
|
| 886 |
+
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
| 887 |
+
h_init: A `float`. The initial step size (for logSNR).
|
| 888 |
+
atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
|
| 889 |
+
rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
|
| 890 |
+
theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
|
| 891 |
+
t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
|
| 892 |
+
current time and `t_0` is less than `t_err`. The default setting is 1e-5.
|
| 893 |
+
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
| 894 |
+
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
| 895 |
+
Returns:
|
| 896 |
+
x_0: A pytorch tensor. The approximated solution at time `t_0`.
|
| 897 |
+
[1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
|
| 898 |
+
"""
|
| 899 |
+
ns = self.noise_schedule
|
| 900 |
+
s = t_T * torch.ones((x.shape[0],)).to(x)
|
| 901 |
+
lambda_s = ns.marginal_lambda(s)
|
| 902 |
+
lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
|
| 903 |
+
h = h_init * torch.ones_like(s).to(x)
|
| 904 |
+
x_prev = x
|
| 905 |
+
nfe = 0
|
| 906 |
+
if order == 2:
|
| 907 |
+
r1 = 0.5
|
| 908 |
+
lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
|
| 909 |
+
higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
|
| 910 |
+
solver_type=solver_type,
|
| 911 |
+
**kwargs)
|
| 912 |
+
elif order == 3:
|
| 913 |
+
r1, r2 = 1. / 3., 2. / 3.
|
| 914 |
+
lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
|
| 915 |
+
return_intermediate=True,
|
| 916 |
+
solver_type=solver_type)
|
| 917 |
+
higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
|
| 918 |
+
solver_type=solver_type,
|
| 919 |
+
**kwargs)
|
| 920 |
+
else:
|
| 921 |
+
raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
|
| 922 |
+
while torch.abs((s - t_0)).mean() > t_err:
|
| 923 |
+
t = ns.inverse_lambda(lambda_s + h)
|
| 924 |
+
x_lower, lower_noise_kwargs = lower_update(x, s, t)
|
| 925 |
+
x_higher = higher_update(x, s, t, **lower_noise_kwargs)
|
| 926 |
+
delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
|
| 927 |
+
norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
|
| 928 |
+
E = norm_fn((x_higher - x_lower) / delta).max()
|
| 929 |
+
if torch.all(E <= 1.):
|
| 930 |
+
x = x_higher
|
| 931 |
+
s = t
|
| 932 |
+
x_prev = x_lower
|
| 933 |
+
lambda_s = ns.marginal_lambda(s)
|
| 934 |
+
h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
|
| 935 |
+
nfe += order
|
| 936 |
+
print('adaptive solver nfe', nfe)
|
| 937 |
+
return x
|
| 938 |
+
|
| 939 |
+
def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
|
| 940 |
+
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
|
| 941 |
+
atol=0.0078, rtol=0.05,
|
| 942 |
+
):
|
| 943 |
+
"""
|
| 944 |
+
Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
|
| 945 |
+
=====================================================
|
| 946 |
+
We support the following algorithms for both noise prediction model and data prediction model:
|
| 947 |
+
- 'singlestep':
|
| 948 |
+
Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
|
| 949 |
+
We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
|
| 950 |
+
The total number of function evaluations (NFE) == `steps`.
|
| 951 |
+
Given a fixed NFE == `steps`, the sampling procedure is:
|
| 952 |
+
- If `order` == 1:
|
| 953 |
+
- Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
|
| 954 |
+
- If `order` == 2:
|
| 955 |
+
- Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
|
| 956 |
+
- If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
|
| 957 |
+
- If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
|
| 958 |
+
- If `order` == 3:
|
| 959 |
+
- Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
|
| 960 |
+
- If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
|
| 961 |
+
- If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
|
| 962 |
+
- If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
|
| 963 |
+
- 'multistep':
|
| 964 |
+
Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
|
| 965 |
+
We initialize the first `order` values by lower order multistep solvers.
|
| 966 |
+
Given a fixed NFE == `steps`, the sampling procedure is:
|
| 967 |
+
Denote K = steps.
|
| 968 |
+
- If `order` == 1:
|
| 969 |
+
- We use K steps of DPM-Solver-1 (i.e. DDIM).
|
| 970 |
+
- If `order` == 2:
|
| 971 |
+
- We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
|
| 972 |
+
- If `order` == 3:
|
| 973 |
+
- We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
|
| 974 |
+
- 'singlestep_fixed':
|
| 975 |
+
Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
|
| 976 |
+
We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
|
| 977 |
+
- 'adaptive':
|
| 978 |
+
Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
|
| 979 |
+
We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
|
| 980 |
+
You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
|
| 981 |
+
(NFE) and the sample quality.
|
| 982 |
+
- If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
|
| 983 |
+
- If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
|
| 984 |
+
=====================================================
|
| 985 |
+
Some advices for choosing the algorithm:
|
| 986 |
+
- For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
|
| 987 |
+
Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
|
| 988 |
+
e.g.
|
| 989 |
+
>>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
|
| 990 |
+
>>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
|
| 991 |
+
skip_type='time_uniform', method='singlestep')
|
| 992 |
+
- For **guided sampling with large guidance scale** by DPMs:
|
| 993 |
+
Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
|
| 994 |
+
e.g.
|
| 995 |
+
>>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
|
| 996 |
+
>>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
|
| 997 |
+
skip_type='time_uniform', method='multistep')
|
| 998 |
+
We support three types of `skip_type`:
|
| 999 |
+
- 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
|
| 1000 |
+
- 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
|
| 1001 |
+
- 'time_quadratic': quadratic time for the time steps.
|
| 1002 |
+
=====================================================
|
| 1003 |
+
Args:
|
| 1004 |
+
x: A pytorch tensor. The initial value at time `t_start`
|
| 1005 |
+
e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
|
| 1006 |
+
steps: A `int`. The total number of function evaluations (NFE).
|
| 1007 |
+
t_start: A `float`. The starting time of the sampling.
|
| 1008 |
+
If `T` is None, we use self.noise_schedule.T (default is 1.0).
|
| 1009 |
+
t_end: A `float`. The ending time of the sampling.
|
| 1010 |
+
If `t_end` is None, we use 1. / self.noise_schedule.total_N.
|
| 1011 |
+
e.g. if total_N == 1000, we have `t_end` == 1e-3.
|
| 1012 |
+
For discrete-time DPMs:
|
| 1013 |
+
- We recommend `t_end` == 1. / self.noise_schedule.total_N.
|
| 1014 |
+
For continuous-time DPMs:
|
| 1015 |
+
- We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
|
| 1016 |
+
order: A `int`. The order of DPM-Solver.
|
| 1017 |
+
skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
|
| 1018 |
+
method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
|
| 1019 |
+
denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
|
| 1020 |
+
Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
|
| 1021 |
+
This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
|
| 1022 |
+
score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
|
| 1023 |
+
for diffusion models sampling by diffusion SDEs for low-resolutional images
|
| 1024 |
+
(such as CIFAR-10). However, we observed that such trick does not matter for
|
| 1025 |
+
high-resolutional images. As it needs an additional NFE, we do not recommend
|
| 1026 |
+
it for high-resolutional images.
|
| 1027 |
+
lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
|
| 1028 |
+
Only valid for `method=multistep` and `steps < 15`. We empirically find that
|
| 1029 |
+
this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
|
| 1030 |
+
(especially for steps <= 10). So we recommend to set it to be `True`.
|
| 1031 |
+
solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
|
| 1032 |
+
atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
|
| 1033 |
+
rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
|
| 1034 |
+
Returns:
|
| 1035 |
+
x_end: A pytorch tensor. The approximated solution at time `t_end`.
|
| 1036 |
+
"""
|
| 1037 |
+
t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
| 1038 |
+
t_T = self.noise_schedule.T if t_start is None else t_start
|
| 1039 |
+
device = x.device
|
| 1040 |
+
if method == 'adaptive':
|
| 1041 |
+
with torch.no_grad():
|
| 1042 |
+
x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
|
| 1043 |
+
solver_type=solver_type)
|
| 1044 |
+
elif method == 'multistep':
|
| 1045 |
+
assert steps >= order
|
| 1046 |
+
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
|
| 1047 |
+
assert timesteps.shape[0] - 1 == steps
|
| 1048 |
+
with torch.no_grad():
|
| 1049 |
+
vec_t = timesteps[0].expand((x.shape[0]))
|
| 1050 |
+
model_prev_list = [self.model_fn(x, vec_t)]
|
| 1051 |
+
t_prev_list = [vec_t]
|
| 1052 |
+
# Init the first `order` values by lower order multistep DPM-Solver.
|
| 1053 |
+
for init_order in tqdm(range(1, order), desc="DPM init order"):
|
| 1054 |
+
vec_t = timesteps[init_order].expand(x.shape[0])
|
| 1055 |
+
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
|
| 1056 |
+
solver_type=solver_type)
|
| 1057 |
+
model_prev_list.append(self.model_fn(x, vec_t))
|
| 1058 |
+
t_prev_list.append(vec_t)
|
| 1059 |
+
# Compute the remaining values by `order`-th order multistep DPM-Solver.
|
| 1060 |
+
for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
|
| 1061 |
+
vec_t = timesteps[step].expand(x.shape[0])
|
| 1062 |
+
if lower_order_final and steps < 15:
|
| 1063 |
+
step_order = min(order, steps + 1 - step)
|
| 1064 |
+
else:
|
| 1065 |
+
step_order = order
|
| 1066 |
+
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
|
| 1067 |
+
solver_type=solver_type)
|
| 1068 |
+
for i in range(order - 1):
|
| 1069 |
+
t_prev_list[i] = t_prev_list[i + 1]
|
| 1070 |
+
model_prev_list[i] = model_prev_list[i + 1]
|
| 1071 |
+
t_prev_list[-1] = vec_t
|
| 1072 |
+
# We do not need to evaluate the final model value.
|
| 1073 |
+
if step < steps:
|
| 1074 |
+
model_prev_list[-1] = self.model_fn(x, vec_t)
|
| 1075 |
+
elif method in ['singlestep', 'singlestep_fixed']:
|
| 1076 |
+
if method == 'singlestep':
|
| 1077 |
+
timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
|
| 1078 |
+
skip_type=skip_type,
|
| 1079 |
+
t_T=t_T, t_0=t_0,
|
| 1080 |
+
device=device)
|
| 1081 |
+
elif method == 'singlestep_fixed':
|
| 1082 |
+
K = steps // order
|
| 1083 |
+
orders = [order, ] * K
|
| 1084 |
+
timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
|
| 1085 |
+
for i, order in enumerate(orders):
|
| 1086 |
+
t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
|
| 1087 |
+
timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
|
| 1088 |
+
N=order, device=device)
|
| 1089 |
+
lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
|
| 1090 |
+
vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
|
| 1091 |
+
h = lambda_inner[-1] - lambda_inner[0]
|
| 1092 |
+
r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
|
| 1093 |
+
r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
|
| 1094 |
+
x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
|
| 1095 |
+
if denoise_to_zero:
|
| 1096 |
+
x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
|
| 1097 |
+
return x
|
| 1098 |
+
|
| 1099 |
+
|
| 1100 |
+
#############################################################
|
| 1101 |
+
# other utility functions
|
| 1102 |
+
#############################################################
|
| 1103 |
+
|
| 1104 |
+
def interpolate_fn(x, xp, yp):
|
| 1105 |
+
"""
|
| 1106 |
+
A piecewise linear function y = f(x), using xp and yp as keypoints.
|
| 1107 |
+
We implement f(x) in a differentiable way (i.e. applicable for autograd).
|
| 1108 |
+
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
|
| 1109 |
+
Args:
|
| 1110 |
+
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
|
| 1111 |
+
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
|
| 1112 |
+
yp: PyTorch tensor with shape [C, K].
|
| 1113 |
+
Returns:
|
| 1114 |
+
The function values f(x), with shape [N, C].
|
| 1115 |
+
"""
|
| 1116 |
+
N, K = x.shape[0], xp.shape[1]
|
| 1117 |
+
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
|
| 1118 |
+
sorted_all_x, x_indices = torch.sort(all_x, dim=2)
|
| 1119 |
+
x_idx = torch.argmin(x_indices, dim=2)
|
| 1120 |
+
cand_start_idx = x_idx - 1
|
| 1121 |
+
start_idx = torch.where(
|
| 1122 |
+
torch.eq(x_idx, 0),
|
| 1123 |
+
torch.tensor(1, device=x.device),
|
| 1124 |
+
torch.where(
|
| 1125 |
+
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
| 1126 |
+
),
|
| 1127 |
+
)
|
| 1128 |
+
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
|
| 1129 |
+
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
|
| 1130 |
+
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
|
| 1131 |
+
start_idx2 = torch.where(
|
| 1132 |
+
torch.eq(x_idx, 0),
|
| 1133 |
+
torch.tensor(0, device=x.device),
|
| 1134 |
+
torch.where(
|
| 1135 |
+
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
| 1136 |
+
),
|
| 1137 |
+
)
|
| 1138 |
+
y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
|
| 1139 |
+
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
|
| 1140 |
+
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
|
| 1141 |
+
cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
|
| 1142 |
+
return cand
|
| 1143 |
+
|
| 1144 |
+
|
| 1145 |
+
def expand_dims(v, dims):
|
| 1146 |
+
"""
|
| 1147 |
+
Expand the tensor `v` to the dim `dims`.
|
| 1148 |
+
Args:
|
| 1149 |
+
`v`: a PyTorch tensor with shape [N].
|
| 1150 |
+
`dim`: a `int`.
|
| 1151 |
+
Returns:
|
| 1152 |
+
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
|
| 1153 |
+
"""
|
| 1154 |
+
return v[(...,) + (None,) * (dims - 1)]
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/dpm_solver/sampler.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SAMPLING ONLY."""
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
MODEL_TYPES = {
|
| 8 |
+
"eps": "noise",
|
| 9 |
+
"v": "v"
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DPMSolverSampler(object):
|
| 14 |
+
def __init__(self, model, **kwargs):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.model = model
|
| 17 |
+
to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
|
| 18 |
+
self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
|
| 19 |
+
|
| 20 |
+
def register_buffer(self, name, attr):
|
| 21 |
+
if type(attr) == torch.Tensor:
|
| 22 |
+
if attr.device != torch.device("cuda"):
|
| 23 |
+
attr = attr.to(torch.device("cuda"))
|
| 24 |
+
setattr(self, name, attr)
|
| 25 |
+
|
| 26 |
+
@torch.no_grad()
|
| 27 |
+
def sample(self,
|
| 28 |
+
S,
|
| 29 |
+
batch_size,
|
| 30 |
+
shape,
|
| 31 |
+
conditioning=None,
|
| 32 |
+
callback=None,
|
| 33 |
+
normals_sequence=None,
|
| 34 |
+
img_callback=None,
|
| 35 |
+
quantize_x0=False,
|
| 36 |
+
eta=0.,
|
| 37 |
+
mask=None,
|
| 38 |
+
x0=None,
|
| 39 |
+
temperature=1.,
|
| 40 |
+
noise_dropout=0.,
|
| 41 |
+
score_corrector=None,
|
| 42 |
+
corrector_kwargs=None,
|
| 43 |
+
verbose=True,
|
| 44 |
+
x_T=None,
|
| 45 |
+
log_every_t=100,
|
| 46 |
+
unconditional_guidance_scale=1.,
|
| 47 |
+
unconditional_conditioning=None,
|
| 48 |
+
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
| 49 |
+
**kwargs
|
| 50 |
+
):
|
| 51 |
+
if conditioning is not None:
|
| 52 |
+
if isinstance(conditioning, dict):
|
| 53 |
+
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
|
| 54 |
+
if cbs != batch_size:
|
| 55 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 56 |
+
else:
|
| 57 |
+
if conditioning.shape[0] != batch_size:
|
| 58 |
+
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
| 59 |
+
|
| 60 |
+
# sampling
|
| 61 |
+
C, H, W = shape
|
| 62 |
+
size = (batch_size, C, H, W)
|
| 63 |
+
|
| 64 |
+
print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
|
| 65 |
+
|
| 66 |
+
device = self.model.betas.device
|
| 67 |
+
if x_T is None:
|
| 68 |
+
img = torch.randn(size, device=device)
|
| 69 |
+
else:
|
| 70 |
+
img = x_T
|
| 71 |
+
|
| 72 |
+
ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
|
| 73 |
+
|
| 74 |
+
model_fn = model_wrapper(
|
| 75 |
+
lambda x, t, c: self.model.apply_model(x, t, c),
|
| 76 |
+
ns,
|
| 77 |
+
model_type=MODEL_TYPES[self.model.parameterization],
|
| 78 |
+
guidance_type="classifier-free",
|
| 79 |
+
condition=conditioning,
|
| 80 |
+
unconditional_condition=unconditional_conditioning,
|
| 81 |
+
guidance_scale=unconditional_guidance_scale,
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
|
| 85 |
+
x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True)
|
| 86 |
+
|
| 87 |
+
return x.to(device), None
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/plms.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SAMPLING ONLY."""
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from functools import partial
|
| 7 |
+
|
| 8 |
+
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
| 9 |
+
from ldm.models.diffusion.sampling_util import norm_thresholding
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PLMSSampler(object):
|
| 13 |
+
def __init__(self, model, schedule="linear", **kwargs):
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.model = model
|
| 16 |
+
self.ddpm_num_timesteps = model.num_timesteps
|
| 17 |
+
self.schedule = schedule
|
| 18 |
+
|
| 19 |
+
def register_buffer(self, name, attr):
|
| 20 |
+
if type(attr) == torch.Tensor:
|
| 21 |
+
if attr.device != torch.device("cuda"):
|
| 22 |
+
attr = attr.to(torch.device("cuda"))
|
| 23 |
+
setattr(self, name, attr)
|
| 24 |
+
|
| 25 |
+
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
| 26 |
+
if ddim_eta != 0:
|
| 27 |
+
raise ValueError('ddim_eta must be 0 for PLMS')
|
| 28 |
+
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
| 29 |
+
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
| 30 |
+
alphas_cumprod = self.model.alphas_cumprod
|
| 31 |
+
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
| 32 |
+
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
| 33 |
+
|
| 34 |
+
self.register_buffer('betas', to_torch(self.model.betas))
|
| 35 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
| 36 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
| 37 |
+
|
| 38 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
| 39 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
| 40 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
| 41 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
| 42 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
| 43 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
| 44 |
+
|
| 45 |
+
# ddim sampling parameters
|
| 46 |
+
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
| 47 |
+
ddim_timesteps=self.ddim_timesteps,
|
| 48 |
+
eta=ddim_eta,verbose=verbose)
|
| 49 |
+
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
| 50 |
+
self.register_buffer('ddim_alphas', ddim_alphas)
|
| 51 |
+
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
| 52 |
+
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
| 53 |
+
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
| 54 |
+
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
| 55 |
+
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
| 56 |
+
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
| 57 |
+
|
| 58 |
+
@torch.no_grad()
|
| 59 |
+
def sample(self,
|
| 60 |
+
S,
|
| 61 |
+
batch_size,
|
| 62 |
+
shape,
|
| 63 |
+
conditioning=None,
|
| 64 |
+
callback=None,
|
| 65 |
+
normals_sequence=None,
|
| 66 |
+
img_callback=None,
|
| 67 |
+
quantize_x0=False,
|
| 68 |
+
eta=0.,
|
| 69 |
+
mask=None,
|
| 70 |
+
x0=None,
|
| 71 |
+
temperature=1.,
|
| 72 |
+
noise_dropout=0.,
|
| 73 |
+
score_corrector=None,
|
| 74 |
+
corrector_kwargs=None,
|
| 75 |
+
verbose=True,
|
| 76 |
+
x_T=None,
|
| 77 |
+
log_every_t=100,
|
| 78 |
+
unconditional_guidance_scale=1.,
|
| 79 |
+
unconditional_conditioning=None,
|
| 80 |
+
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
| 81 |
+
dynamic_threshold=None,
|
| 82 |
+
**kwargs
|
| 83 |
+
):
|
| 84 |
+
if conditioning is not None:
|
| 85 |
+
if isinstance(conditioning, dict):
|
| 86 |
+
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
|
| 87 |
+
if cbs != batch_size:
|
| 88 |
+
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
| 89 |
+
else:
|
| 90 |
+
if conditioning.shape[0] != batch_size:
|
| 91 |
+
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
| 92 |
+
|
| 93 |
+
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
| 94 |
+
# sampling
|
| 95 |
+
C, H, W = shape
|
| 96 |
+
size = (batch_size, C, H, W)
|
| 97 |
+
print(f'Data shape for PLMS sampling is {size}')
|
| 98 |
+
|
| 99 |
+
samples, intermediates = self.plms_sampling(conditioning, size,
|
| 100 |
+
callback=callback,
|
| 101 |
+
img_callback=img_callback,
|
| 102 |
+
quantize_denoised=quantize_x0,
|
| 103 |
+
mask=mask, x0=x0,
|
| 104 |
+
ddim_use_original_steps=False,
|
| 105 |
+
noise_dropout=noise_dropout,
|
| 106 |
+
temperature=temperature,
|
| 107 |
+
score_corrector=score_corrector,
|
| 108 |
+
corrector_kwargs=corrector_kwargs,
|
| 109 |
+
x_T=x_T,
|
| 110 |
+
log_every_t=log_every_t,
|
| 111 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 112 |
+
unconditional_conditioning=unconditional_conditioning,
|
| 113 |
+
dynamic_threshold=dynamic_threshold,
|
| 114 |
+
)
|
| 115 |
+
return samples, intermediates
|
| 116 |
+
|
| 117 |
+
@torch.no_grad()
|
| 118 |
+
def plms_sampling(self, cond, shape,
|
| 119 |
+
x_T=None, ddim_use_original_steps=False,
|
| 120 |
+
callback=None, timesteps=None, quantize_denoised=False,
|
| 121 |
+
mask=None, x0=None, img_callback=None, log_every_t=100,
|
| 122 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
| 123 |
+
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
| 124 |
+
dynamic_threshold=None):
|
| 125 |
+
device = self.model.betas.device
|
| 126 |
+
b = shape[0]
|
| 127 |
+
if x_T is None:
|
| 128 |
+
img = torch.randn(shape, device=device)
|
| 129 |
+
else:
|
| 130 |
+
img = x_T
|
| 131 |
+
|
| 132 |
+
if timesteps is None:
|
| 133 |
+
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
| 134 |
+
elif timesteps is not None and not ddim_use_original_steps:
|
| 135 |
+
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
| 136 |
+
timesteps = self.ddim_timesteps[:subset_end]
|
| 137 |
+
|
| 138 |
+
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
| 139 |
+
time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
|
| 140 |
+
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
| 141 |
+
print(f"Running PLMS Sampling with {total_steps} timesteps")
|
| 142 |
+
|
| 143 |
+
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
|
| 144 |
+
old_eps = []
|
| 145 |
+
|
| 146 |
+
for i, step in enumerate(iterator):
|
| 147 |
+
index = total_steps - i - 1
|
| 148 |
+
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
| 149 |
+
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
|
| 150 |
+
|
| 151 |
+
if mask is not None:
|
| 152 |
+
assert x0 is not None
|
| 153 |
+
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
| 154 |
+
img = img_orig * mask + (1. - mask) * img
|
| 155 |
+
|
| 156 |
+
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
| 157 |
+
quantize_denoised=quantize_denoised, temperature=temperature,
|
| 158 |
+
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
| 159 |
+
corrector_kwargs=corrector_kwargs,
|
| 160 |
+
unconditional_guidance_scale=unconditional_guidance_scale,
|
| 161 |
+
unconditional_conditioning=unconditional_conditioning,
|
| 162 |
+
old_eps=old_eps, t_next=ts_next,
|
| 163 |
+
dynamic_threshold=dynamic_threshold)
|
| 164 |
+
img, pred_x0, e_t = outs
|
| 165 |
+
old_eps.append(e_t)
|
| 166 |
+
if len(old_eps) >= 4:
|
| 167 |
+
old_eps.pop(0)
|
| 168 |
+
if callback: callback(i)
|
| 169 |
+
if img_callback: img_callback(pred_x0, i)
|
| 170 |
+
|
| 171 |
+
if index % log_every_t == 0 or index == total_steps - 1:
|
| 172 |
+
intermediates['x_inter'].append(img)
|
| 173 |
+
intermediates['pred_x0'].append(pred_x0)
|
| 174 |
+
|
| 175 |
+
return img, intermediates
|
| 176 |
+
|
| 177 |
+
@torch.no_grad()
|
| 178 |
+
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
| 179 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
| 180 |
+
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,
|
| 181 |
+
dynamic_threshold=None):
|
| 182 |
+
b, *_, device = *x.shape, x.device
|
| 183 |
+
|
| 184 |
+
def get_model_output(x, t):
|
| 185 |
+
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
| 186 |
+
e_t = self.model.apply_model(x, t, c)
|
| 187 |
+
else:
|
| 188 |
+
x_in = torch.cat([x] * 2)
|
| 189 |
+
t_in = torch.cat([t] * 2)
|
| 190 |
+
c_in = torch.cat([unconditional_conditioning, c])
|
| 191 |
+
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
| 192 |
+
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
| 193 |
+
|
| 194 |
+
if score_corrector is not None:
|
| 195 |
+
assert self.model.parameterization == "eps"
|
| 196 |
+
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
| 197 |
+
|
| 198 |
+
return e_t
|
| 199 |
+
|
| 200 |
+
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
| 201 |
+
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
| 202 |
+
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
| 203 |
+
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
| 204 |
+
|
| 205 |
+
def get_x_prev_and_pred_x0(e_t, index):
|
| 206 |
+
# select parameters corresponding to the currently considered timestep
|
| 207 |
+
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
| 208 |
+
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
| 209 |
+
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
| 210 |
+
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
| 211 |
+
|
| 212 |
+
# current prediction for x_0
|
| 213 |
+
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
| 214 |
+
if quantize_denoised:
|
| 215 |
+
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
| 216 |
+
if dynamic_threshold is not None:
|
| 217 |
+
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
|
| 218 |
+
# direction pointing to x_t
|
| 219 |
+
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
| 220 |
+
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
| 221 |
+
if noise_dropout > 0.:
|
| 222 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
| 223 |
+
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
| 224 |
+
return x_prev, pred_x0
|
| 225 |
+
|
| 226 |
+
e_t = get_model_output(x, t)
|
| 227 |
+
if len(old_eps) == 0:
|
| 228 |
+
# Pseudo Improved Euler (2nd order)
|
| 229 |
+
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
|
| 230 |
+
e_t_next = get_model_output(x_prev, t_next)
|
| 231 |
+
e_t_prime = (e_t + e_t_next) / 2
|
| 232 |
+
elif len(old_eps) == 1:
|
| 233 |
+
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
|
| 234 |
+
e_t_prime = (3 * e_t - old_eps[-1]) / 2
|
| 235 |
+
elif len(old_eps) == 2:
|
| 236 |
+
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
|
| 237 |
+
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
|
| 238 |
+
elif len(old_eps) >= 3:
|
| 239 |
+
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
|
| 240 |
+
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
|
| 241 |
+
|
| 242 |
+
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
|
| 243 |
+
|
| 244 |
+
return x_prev, pred_x0, e_t
|
CCEdit-main/src/controlnet11/ldm/models/diffusion/sampling_util.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def append_dims(x, target_dims):
|
| 6 |
+
"""Appends dimensions to the end of a tensor until it has target_dims dimensions.
|
| 7 |
+
From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
|
| 8 |
+
dims_to_append = target_dims - x.ndim
|
| 9 |
+
if dims_to_append < 0:
|
| 10 |
+
raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
|
| 11 |
+
return x[(...,) + (None,) * dims_to_append]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def norm_thresholding(x0, value):
|
| 15 |
+
s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
|
| 16 |
+
return x0 * (value / s)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def spatial_norm_thresholding(x0, value):
|
| 20 |
+
# b c h w
|
| 21 |
+
s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
|
| 22 |
+
return x0 * (value / s)
|
CCEdit-main/src/controlnet11/ldm/modules/attention.py
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from inspect import isfunction
|
| 2 |
+
import math
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from torch import nn, einsum
|
| 6 |
+
from einops import rearrange, repeat
|
| 7 |
+
from typing import Optional, Any
|
| 8 |
+
|
| 9 |
+
from ldm.modules.diffusionmodules.util import checkpoint
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import xformers
|
| 14 |
+
import xformers.ops
|
| 15 |
+
XFORMERS_IS_AVAILBLE = True
|
| 16 |
+
except:
|
| 17 |
+
XFORMERS_IS_AVAILBLE = False
|
| 18 |
+
|
| 19 |
+
# CrossAttn precision handling
|
| 20 |
+
import os
|
| 21 |
+
_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32")
|
| 22 |
+
|
| 23 |
+
def exists(val):
|
| 24 |
+
return val is not None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def uniq(arr):
|
| 28 |
+
return{el: True for el in arr}.keys()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def default(val, d):
|
| 32 |
+
if exists(val):
|
| 33 |
+
return val
|
| 34 |
+
return d() if isfunction(d) else d
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def max_neg_value(t):
|
| 38 |
+
return -torch.finfo(t.dtype).max
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def init_(tensor):
|
| 42 |
+
dim = tensor.shape[-1]
|
| 43 |
+
std = 1 / math.sqrt(dim)
|
| 44 |
+
tensor.uniform_(-std, std)
|
| 45 |
+
return tensor
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# feedforward
|
| 49 |
+
class GEGLU(nn.Module):
|
| 50 |
+
def __init__(self, dim_in, dim_out):
|
| 51 |
+
super().__init__()
|
| 52 |
+
self.proj = nn.Linear(dim_in, dim_out * 2)
|
| 53 |
+
|
| 54 |
+
def forward(self, x):
|
| 55 |
+
x, gate = self.proj(x).chunk(2, dim=-1)
|
| 56 |
+
return x * F.gelu(gate)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class FeedForward(nn.Module):
|
| 60 |
+
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
|
| 61 |
+
super().__init__()
|
| 62 |
+
inner_dim = int(dim * mult)
|
| 63 |
+
dim_out = default(dim_out, dim)
|
| 64 |
+
project_in = nn.Sequential(
|
| 65 |
+
nn.Linear(dim, inner_dim),
|
| 66 |
+
nn.GELU()
|
| 67 |
+
) if not glu else GEGLU(dim, inner_dim)
|
| 68 |
+
|
| 69 |
+
self.net = nn.Sequential(
|
| 70 |
+
project_in,
|
| 71 |
+
nn.Dropout(dropout),
|
| 72 |
+
nn.Linear(inner_dim, dim_out)
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
def forward(self, x):
|
| 76 |
+
return self.net(x)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def zero_module(module):
|
| 80 |
+
"""
|
| 81 |
+
Zero out the parameters of a module and return it.
|
| 82 |
+
"""
|
| 83 |
+
for p in module.parameters():
|
| 84 |
+
p.detach().zero_()
|
| 85 |
+
return module
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def Normalize(in_channels):
|
| 89 |
+
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class SpatialSelfAttention(nn.Module):
|
| 93 |
+
def __init__(self, in_channels):
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.in_channels = in_channels
|
| 96 |
+
|
| 97 |
+
self.norm = Normalize(in_channels)
|
| 98 |
+
self.q = torch.nn.Conv2d(in_channels,
|
| 99 |
+
in_channels,
|
| 100 |
+
kernel_size=1,
|
| 101 |
+
stride=1,
|
| 102 |
+
padding=0)
|
| 103 |
+
self.k = torch.nn.Conv2d(in_channels,
|
| 104 |
+
in_channels,
|
| 105 |
+
kernel_size=1,
|
| 106 |
+
stride=1,
|
| 107 |
+
padding=0)
|
| 108 |
+
self.v = torch.nn.Conv2d(in_channels,
|
| 109 |
+
in_channels,
|
| 110 |
+
kernel_size=1,
|
| 111 |
+
stride=1,
|
| 112 |
+
padding=0)
|
| 113 |
+
self.proj_out = torch.nn.Conv2d(in_channels,
|
| 114 |
+
in_channels,
|
| 115 |
+
kernel_size=1,
|
| 116 |
+
stride=1,
|
| 117 |
+
padding=0)
|
| 118 |
+
|
| 119 |
+
def forward(self, x):
|
| 120 |
+
h_ = x
|
| 121 |
+
h_ = self.norm(h_)
|
| 122 |
+
q = self.q(h_)
|
| 123 |
+
k = self.k(h_)
|
| 124 |
+
v = self.v(h_)
|
| 125 |
+
|
| 126 |
+
# compute attention
|
| 127 |
+
b,c,h,w = q.shape
|
| 128 |
+
q = rearrange(q, 'b c h w -> b (h w) c')
|
| 129 |
+
k = rearrange(k, 'b c h w -> b c (h w)')
|
| 130 |
+
w_ = torch.einsum('bij,bjk->bik', q, k)
|
| 131 |
+
|
| 132 |
+
w_ = w_ * (int(c)**(-0.5))
|
| 133 |
+
w_ = torch.nn.functional.softmax(w_, dim=2)
|
| 134 |
+
|
| 135 |
+
# attend to values
|
| 136 |
+
v = rearrange(v, 'b c h w -> b c (h w)')
|
| 137 |
+
w_ = rearrange(w_, 'b i j -> b j i')
|
| 138 |
+
h_ = torch.einsum('bij,bjk->bik', v, w_)
|
| 139 |
+
h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
|
| 140 |
+
h_ = self.proj_out(h_)
|
| 141 |
+
|
| 142 |
+
return x+h_
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class CrossAttention(nn.Module):
|
| 146 |
+
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
|
| 147 |
+
super().__init__()
|
| 148 |
+
inner_dim = dim_head * heads
|
| 149 |
+
context_dim = default(context_dim, query_dim)
|
| 150 |
+
|
| 151 |
+
self.scale = dim_head ** -0.5
|
| 152 |
+
self.heads = heads
|
| 153 |
+
|
| 154 |
+
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
| 155 |
+
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
| 156 |
+
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
| 157 |
+
|
| 158 |
+
self.to_out = nn.Sequential(
|
| 159 |
+
nn.Linear(inner_dim, query_dim),
|
| 160 |
+
nn.Dropout(dropout)
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
def forward(self, x, context=None, mask=None):
|
| 164 |
+
h = self.heads
|
| 165 |
+
|
| 166 |
+
q = self.to_q(x)
|
| 167 |
+
context = default(context, x)
|
| 168 |
+
k = self.to_k(context)
|
| 169 |
+
v = self.to_v(context)
|
| 170 |
+
|
| 171 |
+
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
| 172 |
+
|
| 173 |
+
# force cast to fp32 to avoid overflowing
|
| 174 |
+
if _ATTN_PRECISION =="fp32":
|
| 175 |
+
with torch.autocast(enabled=False, device_type = 'cuda'):
|
| 176 |
+
q, k = q.float(), k.float()
|
| 177 |
+
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
| 178 |
+
else:
|
| 179 |
+
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
| 180 |
+
|
| 181 |
+
del q, k
|
| 182 |
+
|
| 183 |
+
if exists(mask):
|
| 184 |
+
mask = rearrange(mask, 'b ... -> b (...)')
|
| 185 |
+
max_neg_value = -torch.finfo(sim.dtype).max
|
| 186 |
+
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
| 187 |
+
sim.masked_fill_(~mask, max_neg_value)
|
| 188 |
+
|
| 189 |
+
# attention, what we cannot get enough of
|
| 190 |
+
sim = sim.softmax(dim=-1)
|
| 191 |
+
|
| 192 |
+
out = einsum('b i j, b j d -> b i d', sim, v)
|
| 193 |
+
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
|
| 194 |
+
return self.to_out(out)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class MemoryEfficientCrossAttention(nn.Module):
|
| 198 |
+
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
| 199 |
+
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
|
| 200 |
+
super().__init__()
|
| 201 |
+
print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
|
| 202 |
+
f"{heads} heads.")
|
| 203 |
+
inner_dim = dim_head * heads
|
| 204 |
+
context_dim = default(context_dim, query_dim)
|
| 205 |
+
|
| 206 |
+
self.heads = heads
|
| 207 |
+
self.dim_head = dim_head
|
| 208 |
+
|
| 209 |
+
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
| 210 |
+
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
| 211 |
+
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
| 212 |
+
|
| 213 |
+
self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
|
| 214 |
+
self.attention_op: Optional[Any] = None
|
| 215 |
+
|
| 216 |
+
def forward(self, x, context=None, mask=None):
|
| 217 |
+
q = self.to_q(x)
|
| 218 |
+
context = default(context, x)
|
| 219 |
+
k = self.to_k(context)
|
| 220 |
+
v = self.to_v(context)
|
| 221 |
+
|
| 222 |
+
b, _, _ = q.shape
|
| 223 |
+
q, k, v = map(
|
| 224 |
+
lambda t: t.unsqueeze(3)
|
| 225 |
+
.reshape(b, t.shape[1], self.heads, self.dim_head)
|
| 226 |
+
.permute(0, 2, 1, 3)
|
| 227 |
+
.reshape(b * self.heads, t.shape[1], self.dim_head)
|
| 228 |
+
.contiguous(),
|
| 229 |
+
(q, k, v),
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# actually compute the attention, what we cannot get enough of
|
| 233 |
+
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
|
| 234 |
+
|
| 235 |
+
if exists(mask):
|
| 236 |
+
raise NotImplementedError
|
| 237 |
+
out = (
|
| 238 |
+
out.unsqueeze(0)
|
| 239 |
+
.reshape(b, self.heads, out.shape[1], self.dim_head)
|
| 240 |
+
.permute(0, 2, 1, 3)
|
| 241 |
+
.reshape(b, out.shape[1], self.heads * self.dim_head)
|
| 242 |
+
)
|
| 243 |
+
return self.to_out(out)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class BasicTransformerBlock(nn.Module):
|
| 247 |
+
ATTENTION_MODES = {
|
| 248 |
+
"softmax": CrossAttention, # vanilla attention
|
| 249 |
+
"softmax-xformers": MemoryEfficientCrossAttention
|
| 250 |
+
}
|
| 251 |
+
def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
|
| 252 |
+
disable_self_attn=False):
|
| 253 |
+
super().__init__()
|
| 254 |
+
attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax"
|
| 255 |
+
assert attn_mode in self.ATTENTION_MODES
|
| 256 |
+
attn_cls = self.ATTENTION_MODES[attn_mode]
|
| 257 |
+
self.disable_self_attn = disable_self_attn
|
| 258 |
+
self.attn1 = attn_cls(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
|
| 259 |
+
context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn
|
| 260 |
+
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
| 261 |
+
self.attn2 = attn_cls(query_dim=dim, context_dim=context_dim,
|
| 262 |
+
heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
|
| 263 |
+
self.norm1 = nn.LayerNorm(dim)
|
| 264 |
+
self.norm2 = nn.LayerNorm(dim)
|
| 265 |
+
self.norm3 = nn.LayerNorm(dim)
|
| 266 |
+
self.checkpoint = checkpoint
|
| 267 |
+
|
| 268 |
+
def forward(self, x, context=None):
|
| 269 |
+
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
|
| 270 |
+
|
| 271 |
+
def _forward(self, x, context=None):
|
| 272 |
+
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
|
| 273 |
+
x = self.attn2(self.norm2(x), context=context) + x
|
| 274 |
+
x = self.ff(self.norm3(x)) + x
|
| 275 |
+
return x
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class SpatialTransformer(nn.Module):
|
| 279 |
+
"""
|
| 280 |
+
Transformer block for image-like data.
|
| 281 |
+
First, project the input (aka embedding)
|
| 282 |
+
and reshape to b, t, d.
|
| 283 |
+
Then apply standard transformer action.
|
| 284 |
+
Finally, reshape to image
|
| 285 |
+
NEW: use_linear for more efficiency instead of the 1x1 convs
|
| 286 |
+
"""
|
| 287 |
+
def __init__(self, in_channels, n_heads, d_head,
|
| 288 |
+
depth=1, dropout=0., context_dim=None,
|
| 289 |
+
disable_self_attn=False, use_linear=False,
|
| 290 |
+
use_checkpoint=True):
|
| 291 |
+
super().__init__()
|
| 292 |
+
if exists(context_dim) and not isinstance(context_dim, list):
|
| 293 |
+
context_dim = [context_dim]
|
| 294 |
+
self.in_channels = in_channels
|
| 295 |
+
inner_dim = n_heads * d_head
|
| 296 |
+
self.norm = Normalize(in_channels)
|
| 297 |
+
if not use_linear:
|
| 298 |
+
self.proj_in = nn.Conv2d(in_channels,
|
| 299 |
+
inner_dim,
|
| 300 |
+
kernel_size=1,
|
| 301 |
+
stride=1,
|
| 302 |
+
padding=0)
|
| 303 |
+
else:
|
| 304 |
+
self.proj_in = nn.Linear(in_channels, inner_dim)
|
| 305 |
+
|
| 306 |
+
self.transformer_blocks = nn.ModuleList(
|
| 307 |
+
[BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
|
| 308 |
+
disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)
|
| 309 |
+
for d in range(depth)]
|
| 310 |
+
)
|
| 311 |
+
if not use_linear:
|
| 312 |
+
self.proj_out = zero_module(nn.Conv2d(inner_dim,
|
| 313 |
+
in_channels,
|
| 314 |
+
kernel_size=1,
|
| 315 |
+
stride=1,
|
| 316 |
+
padding=0))
|
| 317 |
+
else:
|
| 318 |
+
self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
|
| 319 |
+
self.use_linear = use_linear
|
| 320 |
+
|
| 321 |
+
def forward(self, x, context=None):
|
| 322 |
+
# note: if no context is given, cross-attention defaults to self-attention
|
| 323 |
+
if not isinstance(context, list):
|
| 324 |
+
context = [context]
|
| 325 |
+
b, c, h, w = x.shape
|
| 326 |
+
x_in = x
|
| 327 |
+
x = self.norm(x)
|
| 328 |
+
if not self.use_linear:
|
| 329 |
+
x = self.proj_in(x)
|
| 330 |
+
x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
|
| 331 |
+
if self.use_linear:
|
| 332 |
+
x = self.proj_in(x)
|
| 333 |
+
for i, block in enumerate(self.transformer_blocks):
|
| 334 |
+
x = block(x, context=context[i])
|
| 335 |
+
if self.use_linear:
|
| 336 |
+
x = self.proj_out(x)
|
| 337 |
+
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
|
| 338 |
+
if not self.use_linear:
|
| 339 |
+
x = self.proj_out(x)
|
| 340 |
+
return x + x_in
|
| 341 |
+
|
CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/__init__.py
ADDED
|
File without changes
|
CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/model.py
ADDED
|
@@ -0,0 +1,852 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# pytorch_diffusion + derived encoder decoder
|
| 2 |
+
import math
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import numpy as np
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
from typing import Optional, Any
|
| 8 |
+
|
| 9 |
+
from ldm.modules.attention import MemoryEfficientCrossAttention
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
import xformers
|
| 13 |
+
import xformers.ops
|
| 14 |
+
XFORMERS_IS_AVAILBLE = True
|
| 15 |
+
except:
|
| 16 |
+
XFORMERS_IS_AVAILBLE = False
|
| 17 |
+
print("No module 'xformers'. Proceeding without it.")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_timestep_embedding(timesteps, embedding_dim):
|
| 21 |
+
"""
|
| 22 |
+
This matches the implementation in Denoising Diffusion Probabilistic Models:
|
| 23 |
+
From Fairseq.
|
| 24 |
+
Build sinusoidal embeddings.
|
| 25 |
+
This matches the implementation in tensor2tensor, but differs slightly
|
| 26 |
+
from the description in Section 3.5 of "Attention Is All You Need".
|
| 27 |
+
"""
|
| 28 |
+
assert len(timesteps.shape) == 1
|
| 29 |
+
|
| 30 |
+
half_dim = embedding_dim // 2
|
| 31 |
+
emb = math.log(10000) / (half_dim - 1)
|
| 32 |
+
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
|
| 33 |
+
emb = emb.to(device=timesteps.device)
|
| 34 |
+
emb = timesteps.float()[:, None] * emb[None, :]
|
| 35 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 36 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 37 |
+
emb = torch.nn.functional.pad(emb, (0,1,0,0))
|
| 38 |
+
return emb
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def nonlinearity(x):
|
| 42 |
+
# swish
|
| 43 |
+
return x*torch.sigmoid(x)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def Normalize(in_channels, num_groups=32):
|
| 47 |
+
return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class Upsample(nn.Module):
|
| 51 |
+
def __init__(self, in_channels, with_conv):
|
| 52 |
+
super().__init__()
|
| 53 |
+
self.with_conv = with_conv
|
| 54 |
+
if self.with_conv:
|
| 55 |
+
self.conv = torch.nn.Conv2d(in_channels,
|
| 56 |
+
in_channels,
|
| 57 |
+
kernel_size=3,
|
| 58 |
+
stride=1,
|
| 59 |
+
padding=1)
|
| 60 |
+
|
| 61 |
+
def forward(self, x):
|
| 62 |
+
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
| 63 |
+
if self.with_conv:
|
| 64 |
+
x = self.conv(x)
|
| 65 |
+
return x
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class Downsample(nn.Module):
|
| 69 |
+
def __init__(self, in_channels, with_conv):
|
| 70 |
+
super().__init__()
|
| 71 |
+
self.with_conv = with_conv
|
| 72 |
+
if self.with_conv:
|
| 73 |
+
# no asymmetric padding in torch conv, must do it ourselves
|
| 74 |
+
self.conv = torch.nn.Conv2d(in_channels,
|
| 75 |
+
in_channels,
|
| 76 |
+
kernel_size=3,
|
| 77 |
+
stride=2,
|
| 78 |
+
padding=0)
|
| 79 |
+
|
| 80 |
+
def forward(self, x):
|
| 81 |
+
if self.with_conv:
|
| 82 |
+
pad = (0,1,0,1)
|
| 83 |
+
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
| 84 |
+
x = self.conv(x)
|
| 85 |
+
else:
|
| 86 |
+
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
|
| 87 |
+
return x
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class ResnetBlock(nn.Module):
|
| 91 |
+
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
|
| 92 |
+
dropout, temb_channels=512):
|
| 93 |
+
super().__init__()
|
| 94 |
+
self.in_channels = in_channels
|
| 95 |
+
out_channels = in_channels if out_channels is None else out_channels
|
| 96 |
+
self.out_channels = out_channels
|
| 97 |
+
self.use_conv_shortcut = conv_shortcut
|
| 98 |
+
|
| 99 |
+
self.norm1 = Normalize(in_channels)
|
| 100 |
+
self.conv1 = torch.nn.Conv2d(in_channels,
|
| 101 |
+
out_channels,
|
| 102 |
+
kernel_size=3,
|
| 103 |
+
stride=1,
|
| 104 |
+
padding=1)
|
| 105 |
+
if temb_channels > 0:
|
| 106 |
+
self.temb_proj = torch.nn.Linear(temb_channels,
|
| 107 |
+
out_channels)
|
| 108 |
+
self.norm2 = Normalize(out_channels)
|
| 109 |
+
self.dropout = torch.nn.Dropout(dropout)
|
| 110 |
+
self.conv2 = torch.nn.Conv2d(out_channels,
|
| 111 |
+
out_channels,
|
| 112 |
+
kernel_size=3,
|
| 113 |
+
stride=1,
|
| 114 |
+
padding=1)
|
| 115 |
+
if self.in_channels != self.out_channels:
|
| 116 |
+
if self.use_conv_shortcut:
|
| 117 |
+
self.conv_shortcut = torch.nn.Conv2d(in_channels,
|
| 118 |
+
out_channels,
|
| 119 |
+
kernel_size=3,
|
| 120 |
+
stride=1,
|
| 121 |
+
padding=1)
|
| 122 |
+
else:
|
| 123 |
+
self.nin_shortcut = torch.nn.Conv2d(in_channels,
|
| 124 |
+
out_channels,
|
| 125 |
+
kernel_size=1,
|
| 126 |
+
stride=1,
|
| 127 |
+
padding=0)
|
| 128 |
+
|
| 129 |
+
def forward(self, x, temb):
|
| 130 |
+
h = x
|
| 131 |
+
h = self.norm1(h)
|
| 132 |
+
h = nonlinearity(h)
|
| 133 |
+
h = self.conv1(h)
|
| 134 |
+
|
| 135 |
+
if temb is not None:
|
| 136 |
+
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
|
| 137 |
+
|
| 138 |
+
h = self.norm2(h)
|
| 139 |
+
h = nonlinearity(h)
|
| 140 |
+
h = self.dropout(h)
|
| 141 |
+
h = self.conv2(h)
|
| 142 |
+
|
| 143 |
+
if self.in_channels != self.out_channels:
|
| 144 |
+
if self.use_conv_shortcut:
|
| 145 |
+
x = self.conv_shortcut(x)
|
| 146 |
+
else:
|
| 147 |
+
x = self.nin_shortcut(x)
|
| 148 |
+
|
| 149 |
+
return x+h
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class AttnBlock(nn.Module):
|
| 153 |
+
def __init__(self, in_channels):
|
| 154 |
+
super().__init__()
|
| 155 |
+
self.in_channels = in_channels
|
| 156 |
+
|
| 157 |
+
self.norm = Normalize(in_channels)
|
| 158 |
+
self.q = torch.nn.Conv2d(in_channels,
|
| 159 |
+
in_channels,
|
| 160 |
+
kernel_size=1,
|
| 161 |
+
stride=1,
|
| 162 |
+
padding=0)
|
| 163 |
+
self.k = torch.nn.Conv2d(in_channels,
|
| 164 |
+
in_channels,
|
| 165 |
+
kernel_size=1,
|
| 166 |
+
stride=1,
|
| 167 |
+
padding=0)
|
| 168 |
+
self.v = torch.nn.Conv2d(in_channels,
|
| 169 |
+
in_channels,
|
| 170 |
+
kernel_size=1,
|
| 171 |
+
stride=1,
|
| 172 |
+
padding=0)
|
| 173 |
+
self.proj_out = torch.nn.Conv2d(in_channels,
|
| 174 |
+
in_channels,
|
| 175 |
+
kernel_size=1,
|
| 176 |
+
stride=1,
|
| 177 |
+
padding=0)
|
| 178 |
+
|
| 179 |
+
def forward(self, x):
|
| 180 |
+
h_ = x
|
| 181 |
+
h_ = self.norm(h_)
|
| 182 |
+
q = self.q(h_)
|
| 183 |
+
k = self.k(h_)
|
| 184 |
+
v = self.v(h_)
|
| 185 |
+
|
| 186 |
+
# compute attention
|
| 187 |
+
b,c,h,w = q.shape
|
| 188 |
+
q = q.reshape(b,c,h*w)
|
| 189 |
+
q = q.permute(0,2,1) # b,hw,c
|
| 190 |
+
k = k.reshape(b,c,h*w) # b,c,hw
|
| 191 |
+
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
|
| 192 |
+
w_ = w_ * (int(c)**(-0.5))
|
| 193 |
+
w_ = torch.nn.functional.softmax(w_, dim=2)
|
| 194 |
+
|
| 195 |
+
# attend to values
|
| 196 |
+
v = v.reshape(b,c,h*w)
|
| 197 |
+
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
|
| 198 |
+
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
|
| 199 |
+
h_ = h_.reshape(b,c,h,w)
|
| 200 |
+
|
| 201 |
+
h_ = self.proj_out(h_)
|
| 202 |
+
|
| 203 |
+
return x+h_
|
| 204 |
+
|
| 205 |
+
class MemoryEfficientAttnBlock(nn.Module):
|
| 206 |
+
"""
|
| 207 |
+
Uses xformers efficient implementation,
|
| 208 |
+
see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
| 209 |
+
Note: this is a single-head self-attention operation
|
| 210 |
+
"""
|
| 211 |
+
#
|
| 212 |
+
def __init__(self, in_channels):
|
| 213 |
+
super().__init__()
|
| 214 |
+
self.in_channels = in_channels
|
| 215 |
+
|
| 216 |
+
self.norm = Normalize(in_channels)
|
| 217 |
+
self.q = torch.nn.Conv2d(in_channels,
|
| 218 |
+
in_channels,
|
| 219 |
+
kernel_size=1,
|
| 220 |
+
stride=1,
|
| 221 |
+
padding=0)
|
| 222 |
+
self.k = torch.nn.Conv2d(in_channels,
|
| 223 |
+
in_channels,
|
| 224 |
+
kernel_size=1,
|
| 225 |
+
stride=1,
|
| 226 |
+
padding=0)
|
| 227 |
+
self.v = torch.nn.Conv2d(in_channels,
|
| 228 |
+
in_channels,
|
| 229 |
+
kernel_size=1,
|
| 230 |
+
stride=1,
|
| 231 |
+
padding=0)
|
| 232 |
+
self.proj_out = torch.nn.Conv2d(in_channels,
|
| 233 |
+
in_channels,
|
| 234 |
+
kernel_size=1,
|
| 235 |
+
stride=1,
|
| 236 |
+
padding=0)
|
| 237 |
+
self.attention_op: Optional[Any] = None
|
| 238 |
+
|
| 239 |
+
def forward(self, x):
|
| 240 |
+
h_ = x
|
| 241 |
+
h_ = self.norm(h_)
|
| 242 |
+
q = self.q(h_)
|
| 243 |
+
k = self.k(h_)
|
| 244 |
+
v = self.v(h_)
|
| 245 |
+
|
| 246 |
+
# compute attention
|
| 247 |
+
B, C, H, W = q.shape
|
| 248 |
+
q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
|
| 249 |
+
|
| 250 |
+
q, k, v = map(
|
| 251 |
+
lambda t: t.unsqueeze(3)
|
| 252 |
+
.reshape(B, t.shape[1], 1, C)
|
| 253 |
+
.permute(0, 2, 1, 3)
|
| 254 |
+
.reshape(B * 1, t.shape[1], C)
|
| 255 |
+
.contiguous(),
|
| 256 |
+
(q, k, v),
|
| 257 |
+
)
|
| 258 |
+
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
|
| 259 |
+
|
| 260 |
+
out = (
|
| 261 |
+
out.unsqueeze(0)
|
| 262 |
+
.reshape(B, 1, out.shape[1], C)
|
| 263 |
+
.permute(0, 2, 1, 3)
|
| 264 |
+
.reshape(B, out.shape[1], C)
|
| 265 |
+
)
|
| 266 |
+
out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
|
| 267 |
+
out = self.proj_out(out)
|
| 268 |
+
return x+out
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
|
| 272 |
+
def forward(self, x, context=None, mask=None):
|
| 273 |
+
b, c, h, w = x.shape
|
| 274 |
+
x = rearrange(x, 'b c h w -> b (h w) c')
|
| 275 |
+
out = super().forward(x, context=context, mask=mask)
|
| 276 |
+
out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c)
|
| 277 |
+
return x + out
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
|
| 281 |
+
assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
|
| 282 |
+
if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
|
| 283 |
+
attn_type = "vanilla-xformers"
|
| 284 |
+
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
| 285 |
+
if attn_type == "vanilla":
|
| 286 |
+
assert attn_kwargs is None
|
| 287 |
+
return AttnBlock(in_channels)
|
| 288 |
+
elif attn_type == "vanilla-xformers":
|
| 289 |
+
print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
|
| 290 |
+
return MemoryEfficientAttnBlock(in_channels)
|
| 291 |
+
elif type == "memory-efficient-cross-attn":
|
| 292 |
+
attn_kwargs["query_dim"] = in_channels
|
| 293 |
+
return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
|
| 294 |
+
elif attn_type == "none":
|
| 295 |
+
return nn.Identity(in_channels)
|
| 296 |
+
else:
|
| 297 |
+
raise NotImplementedError()
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
class Model(nn.Module):
|
| 301 |
+
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
| 302 |
+
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
| 303 |
+
resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
|
| 304 |
+
super().__init__()
|
| 305 |
+
if use_linear_attn: attn_type = "linear"
|
| 306 |
+
self.ch = ch
|
| 307 |
+
self.temb_ch = self.ch*4
|
| 308 |
+
self.num_resolutions = len(ch_mult)
|
| 309 |
+
self.num_res_blocks = num_res_blocks
|
| 310 |
+
self.resolution = resolution
|
| 311 |
+
self.in_channels = in_channels
|
| 312 |
+
|
| 313 |
+
self.use_timestep = use_timestep
|
| 314 |
+
if self.use_timestep:
|
| 315 |
+
# timestep embedding
|
| 316 |
+
self.temb = nn.Module()
|
| 317 |
+
self.temb.dense = nn.ModuleList([
|
| 318 |
+
torch.nn.Linear(self.ch,
|
| 319 |
+
self.temb_ch),
|
| 320 |
+
torch.nn.Linear(self.temb_ch,
|
| 321 |
+
self.temb_ch),
|
| 322 |
+
])
|
| 323 |
+
|
| 324 |
+
# downsampling
|
| 325 |
+
self.conv_in = torch.nn.Conv2d(in_channels,
|
| 326 |
+
self.ch,
|
| 327 |
+
kernel_size=3,
|
| 328 |
+
stride=1,
|
| 329 |
+
padding=1)
|
| 330 |
+
|
| 331 |
+
curr_res = resolution
|
| 332 |
+
in_ch_mult = (1,)+tuple(ch_mult)
|
| 333 |
+
self.down = nn.ModuleList()
|
| 334 |
+
for i_level in range(self.num_resolutions):
|
| 335 |
+
block = nn.ModuleList()
|
| 336 |
+
attn = nn.ModuleList()
|
| 337 |
+
block_in = ch*in_ch_mult[i_level]
|
| 338 |
+
block_out = ch*ch_mult[i_level]
|
| 339 |
+
for i_block in range(self.num_res_blocks):
|
| 340 |
+
block.append(ResnetBlock(in_channels=block_in,
|
| 341 |
+
out_channels=block_out,
|
| 342 |
+
temb_channels=self.temb_ch,
|
| 343 |
+
dropout=dropout))
|
| 344 |
+
block_in = block_out
|
| 345 |
+
if curr_res in attn_resolutions:
|
| 346 |
+
attn.append(make_attn(block_in, attn_type=attn_type))
|
| 347 |
+
down = nn.Module()
|
| 348 |
+
down.block = block
|
| 349 |
+
down.attn = attn
|
| 350 |
+
if i_level != self.num_resolutions-1:
|
| 351 |
+
down.downsample = Downsample(block_in, resamp_with_conv)
|
| 352 |
+
curr_res = curr_res // 2
|
| 353 |
+
self.down.append(down)
|
| 354 |
+
|
| 355 |
+
# middle
|
| 356 |
+
self.mid = nn.Module()
|
| 357 |
+
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
| 358 |
+
out_channels=block_in,
|
| 359 |
+
temb_channels=self.temb_ch,
|
| 360 |
+
dropout=dropout)
|
| 361 |
+
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
| 362 |
+
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
| 363 |
+
out_channels=block_in,
|
| 364 |
+
temb_channels=self.temb_ch,
|
| 365 |
+
dropout=dropout)
|
| 366 |
+
|
| 367 |
+
# upsampling
|
| 368 |
+
self.up = nn.ModuleList()
|
| 369 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 370 |
+
block = nn.ModuleList()
|
| 371 |
+
attn = nn.ModuleList()
|
| 372 |
+
block_out = ch*ch_mult[i_level]
|
| 373 |
+
skip_in = ch*ch_mult[i_level]
|
| 374 |
+
for i_block in range(self.num_res_blocks+1):
|
| 375 |
+
if i_block == self.num_res_blocks:
|
| 376 |
+
skip_in = ch*in_ch_mult[i_level]
|
| 377 |
+
block.append(ResnetBlock(in_channels=block_in+skip_in,
|
| 378 |
+
out_channels=block_out,
|
| 379 |
+
temb_channels=self.temb_ch,
|
| 380 |
+
dropout=dropout))
|
| 381 |
+
block_in = block_out
|
| 382 |
+
if curr_res in attn_resolutions:
|
| 383 |
+
attn.append(make_attn(block_in, attn_type=attn_type))
|
| 384 |
+
up = nn.Module()
|
| 385 |
+
up.block = block
|
| 386 |
+
up.attn = attn
|
| 387 |
+
if i_level != 0:
|
| 388 |
+
up.upsample = Upsample(block_in, resamp_with_conv)
|
| 389 |
+
curr_res = curr_res * 2
|
| 390 |
+
self.up.insert(0, up) # prepend to get consistent order
|
| 391 |
+
|
| 392 |
+
# end
|
| 393 |
+
self.norm_out = Normalize(block_in)
|
| 394 |
+
self.conv_out = torch.nn.Conv2d(block_in,
|
| 395 |
+
out_ch,
|
| 396 |
+
kernel_size=3,
|
| 397 |
+
stride=1,
|
| 398 |
+
padding=1)
|
| 399 |
+
|
| 400 |
+
def forward(self, x, t=None, context=None):
|
| 401 |
+
#assert x.shape[2] == x.shape[3] == self.resolution
|
| 402 |
+
if context is not None:
|
| 403 |
+
# assume aligned context, cat along channel axis
|
| 404 |
+
x = torch.cat((x, context), dim=1)
|
| 405 |
+
if self.use_timestep:
|
| 406 |
+
# timestep embedding
|
| 407 |
+
assert t is not None
|
| 408 |
+
temb = get_timestep_embedding(t, self.ch)
|
| 409 |
+
temb = self.temb.dense[0](temb)
|
| 410 |
+
temb = nonlinearity(temb)
|
| 411 |
+
temb = self.temb.dense[1](temb)
|
| 412 |
+
else:
|
| 413 |
+
temb = None
|
| 414 |
+
|
| 415 |
+
# downsampling
|
| 416 |
+
hs = [self.conv_in(x)]
|
| 417 |
+
for i_level in range(self.num_resolutions):
|
| 418 |
+
for i_block in range(self.num_res_blocks):
|
| 419 |
+
h = self.down[i_level].block[i_block](hs[-1], temb)
|
| 420 |
+
if len(self.down[i_level].attn) > 0:
|
| 421 |
+
h = self.down[i_level].attn[i_block](h)
|
| 422 |
+
hs.append(h)
|
| 423 |
+
if i_level != self.num_resolutions-1:
|
| 424 |
+
hs.append(self.down[i_level].downsample(hs[-1]))
|
| 425 |
+
|
| 426 |
+
# middle
|
| 427 |
+
h = hs[-1]
|
| 428 |
+
h = self.mid.block_1(h, temb)
|
| 429 |
+
h = self.mid.attn_1(h)
|
| 430 |
+
h = self.mid.block_2(h, temb)
|
| 431 |
+
|
| 432 |
+
# upsampling
|
| 433 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 434 |
+
for i_block in range(self.num_res_blocks+1):
|
| 435 |
+
h = self.up[i_level].block[i_block](
|
| 436 |
+
torch.cat([h, hs.pop()], dim=1), temb)
|
| 437 |
+
if len(self.up[i_level].attn) > 0:
|
| 438 |
+
h = self.up[i_level].attn[i_block](h)
|
| 439 |
+
if i_level != 0:
|
| 440 |
+
h = self.up[i_level].upsample(h)
|
| 441 |
+
|
| 442 |
+
# end
|
| 443 |
+
h = self.norm_out(h)
|
| 444 |
+
h = nonlinearity(h)
|
| 445 |
+
h = self.conv_out(h)
|
| 446 |
+
return h
|
| 447 |
+
|
| 448 |
+
def get_last_layer(self):
|
| 449 |
+
return self.conv_out.weight
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
class Encoder(nn.Module):
|
| 453 |
+
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
| 454 |
+
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
| 455 |
+
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
|
| 456 |
+
**ignore_kwargs):
|
| 457 |
+
super().__init__()
|
| 458 |
+
if use_linear_attn: attn_type = "linear"
|
| 459 |
+
self.ch = ch
|
| 460 |
+
self.temb_ch = 0
|
| 461 |
+
self.num_resolutions = len(ch_mult)
|
| 462 |
+
self.num_res_blocks = num_res_blocks
|
| 463 |
+
self.resolution = resolution
|
| 464 |
+
self.in_channels = in_channels
|
| 465 |
+
|
| 466 |
+
# downsampling
|
| 467 |
+
self.conv_in = torch.nn.Conv2d(in_channels,
|
| 468 |
+
self.ch,
|
| 469 |
+
kernel_size=3,
|
| 470 |
+
stride=1,
|
| 471 |
+
padding=1)
|
| 472 |
+
|
| 473 |
+
curr_res = resolution
|
| 474 |
+
in_ch_mult = (1,)+tuple(ch_mult)
|
| 475 |
+
self.in_ch_mult = in_ch_mult
|
| 476 |
+
self.down = nn.ModuleList()
|
| 477 |
+
for i_level in range(self.num_resolutions):
|
| 478 |
+
block = nn.ModuleList()
|
| 479 |
+
attn = nn.ModuleList()
|
| 480 |
+
block_in = ch*in_ch_mult[i_level]
|
| 481 |
+
block_out = ch*ch_mult[i_level]
|
| 482 |
+
for i_block in range(self.num_res_blocks):
|
| 483 |
+
block.append(ResnetBlock(in_channels=block_in,
|
| 484 |
+
out_channels=block_out,
|
| 485 |
+
temb_channels=self.temb_ch,
|
| 486 |
+
dropout=dropout))
|
| 487 |
+
block_in = block_out
|
| 488 |
+
if curr_res in attn_resolutions:
|
| 489 |
+
attn.append(make_attn(block_in, attn_type=attn_type))
|
| 490 |
+
down = nn.Module()
|
| 491 |
+
down.block = block
|
| 492 |
+
down.attn = attn
|
| 493 |
+
if i_level != self.num_resolutions-1:
|
| 494 |
+
down.downsample = Downsample(block_in, resamp_with_conv)
|
| 495 |
+
curr_res = curr_res // 2
|
| 496 |
+
self.down.append(down)
|
| 497 |
+
|
| 498 |
+
# middle
|
| 499 |
+
self.mid = nn.Module()
|
| 500 |
+
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
| 501 |
+
out_channels=block_in,
|
| 502 |
+
temb_channels=self.temb_ch,
|
| 503 |
+
dropout=dropout)
|
| 504 |
+
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
| 505 |
+
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
| 506 |
+
out_channels=block_in,
|
| 507 |
+
temb_channels=self.temb_ch,
|
| 508 |
+
dropout=dropout)
|
| 509 |
+
|
| 510 |
+
# end
|
| 511 |
+
self.norm_out = Normalize(block_in)
|
| 512 |
+
self.conv_out = torch.nn.Conv2d(block_in,
|
| 513 |
+
2*z_channels if double_z else z_channels,
|
| 514 |
+
kernel_size=3,
|
| 515 |
+
stride=1,
|
| 516 |
+
padding=1)
|
| 517 |
+
|
| 518 |
+
def forward(self, x):
|
| 519 |
+
# timestep embedding
|
| 520 |
+
temb = None
|
| 521 |
+
|
| 522 |
+
# downsampling
|
| 523 |
+
hs = [self.conv_in(x)]
|
| 524 |
+
for i_level in range(self.num_resolutions):
|
| 525 |
+
for i_block in range(self.num_res_blocks):
|
| 526 |
+
h = self.down[i_level].block[i_block](hs[-1], temb)
|
| 527 |
+
if len(self.down[i_level].attn) > 0:
|
| 528 |
+
h = self.down[i_level].attn[i_block](h)
|
| 529 |
+
hs.append(h)
|
| 530 |
+
if i_level != self.num_resolutions-1:
|
| 531 |
+
hs.append(self.down[i_level].downsample(hs[-1]))
|
| 532 |
+
|
| 533 |
+
# middle
|
| 534 |
+
h = hs[-1]
|
| 535 |
+
h = self.mid.block_1(h, temb)
|
| 536 |
+
h = self.mid.attn_1(h)
|
| 537 |
+
h = self.mid.block_2(h, temb)
|
| 538 |
+
|
| 539 |
+
# end
|
| 540 |
+
h = self.norm_out(h)
|
| 541 |
+
h = nonlinearity(h)
|
| 542 |
+
h = self.conv_out(h)
|
| 543 |
+
return h
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
class Decoder(nn.Module):
|
| 547 |
+
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
| 548 |
+
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
| 549 |
+
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
|
| 550 |
+
attn_type="vanilla", **ignorekwargs):
|
| 551 |
+
super().__init__()
|
| 552 |
+
if use_linear_attn: attn_type = "linear"
|
| 553 |
+
self.ch = ch
|
| 554 |
+
self.temb_ch = 0
|
| 555 |
+
self.num_resolutions = len(ch_mult)
|
| 556 |
+
self.num_res_blocks = num_res_blocks
|
| 557 |
+
self.resolution = resolution
|
| 558 |
+
self.in_channels = in_channels
|
| 559 |
+
self.give_pre_end = give_pre_end
|
| 560 |
+
self.tanh_out = tanh_out
|
| 561 |
+
|
| 562 |
+
# compute in_ch_mult, block_in and curr_res at lowest res
|
| 563 |
+
in_ch_mult = (1,)+tuple(ch_mult)
|
| 564 |
+
block_in = ch*ch_mult[self.num_resolutions-1]
|
| 565 |
+
curr_res = resolution // 2**(self.num_resolutions-1)
|
| 566 |
+
self.z_shape = (1,z_channels,curr_res,curr_res)
|
| 567 |
+
print("Working with z of shape {} = {} dimensions.".format(
|
| 568 |
+
self.z_shape, np.prod(self.z_shape)))
|
| 569 |
+
|
| 570 |
+
# z to block_in
|
| 571 |
+
self.conv_in = torch.nn.Conv2d(z_channels,
|
| 572 |
+
block_in,
|
| 573 |
+
kernel_size=3,
|
| 574 |
+
stride=1,
|
| 575 |
+
padding=1)
|
| 576 |
+
|
| 577 |
+
# middle
|
| 578 |
+
self.mid = nn.Module()
|
| 579 |
+
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
| 580 |
+
out_channels=block_in,
|
| 581 |
+
temb_channels=self.temb_ch,
|
| 582 |
+
dropout=dropout)
|
| 583 |
+
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
| 584 |
+
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
| 585 |
+
out_channels=block_in,
|
| 586 |
+
temb_channels=self.temb_ch,
|
| 587 |
+
dropout=dropout)
|
| 588 |
+
|
| 589 |
+
# upsampling
|
| 590 |
+
self.up = nn.ModuleList()
|
| 591 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 592 |
+
block = nn.ModuleList()
|
| 593 |
+
attn = nn.ModuleList()
|
| 594 |
+
block_out = ch*ch_mult[i_level]
|
| 595 |
+
for i_block in range(self.num_res_blocks+1):
|
| 596 |
+
block.append(ResnetBlock(in_channels=block_in,
|
| 597 |
+
out_channels=block_out,
|
| 598 |
+
temb_channels=self.temb_ch,
|
| 599 |
+
dropout=dropout))
|
| 600 |
+
block_in = block_out
|
| 601 |
+
if curr_res in attn_resolutions:
|
| 602 |
+
attn.append(make_attn(block_in, attn_type=attn_type))
|
| 603 |
+
up = nn.Module()
|
| 604 |
+
up.block = block
|
| 605 |
+
up.attn = attn
|
| 606 |
+
if i_level != 0:
|
| 607 |
+
up.upsample = Upsample(block_in, resamp_with_conv)
|
| 608 |
+
curr_res = curr_res * 2
|
| 609 |
+
self.up.insert(0, up) # prepend to get consistent order
|
| 610 |
+
|
| 611 |
+
# end
|
| 612 |
+
self.norm_out = Normalize(block_in)
|
| 613 |
+
self.conv_out = torch.nn.Conv2d(block_in,
|
| 614 |
+
out_ch,
|
| 615 |
+
kernel_size=3,
|
| 616 |
+
stride=1,
|
| 617 |
+
padding=1)
|
| 618 |
+
|
| 619 |
+
def forward(self, z):
|
| 620 |
+
#assert z.shape[1:] == self.z_shape[1:]
|
| 621 |
+
self.last_z_shape = z.shape
|
| 622 |
+
|
| 623 |
+
# timestep embedding
|
| 624 |
+
temb = None
|
| 625 |
+
|
| 626 |
+
# z to block_in
|
| 627 |
+
h = self.conv_in(z)
|
| 628 |
+
|
| 629 |
+
# middle
|
| 630 |
+
h = self.mid.block_1(h, temb)
|
| 631 |
+
h = self.mid.attn_1(h)
|
| 632 |
+
h = self.mid.block_2(h, temb)
|
| 633 |
+
|
| 634 |
+
# upsampling
|
| 635 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 636 |
+
for i_block in range(self.num_res_blocks+1):
|
| 637 |
+
h = self.up[i_level].block[i_block](h, temb)
|
| 638 |
+
if len(self.up[i_level].attn) > 0:
|
| 639 |
+
h = self.up[i_level].attn[i_block](h)
|
| 640 |
+
if i_level != 0:
|
| 641 |
+
h = self.up[i_level].upsample(h)
|
| 642 |
+
|
| 643 |
+
# end
|
| 644 |
+
if self.give_pre_end:
|
| 645 |
+
return h
|
| 646 |
+
|
| 647 |
+
h = self.norm_out(h)
|
| 648 |
+
h = nonlinearity(h)
|
| 649 |
+
h = self.conv_out(h)
|
| 650 |
+
if self.tanh_out:
|
| 651 |
+
h = torch.tanh(h)
|
| 652 |
+
return h
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
class SimpleDecoder(nn.Module):
|
| 656 |
+
def __init__(self, in_channels, out_channels, *args, **kwargs):
|
| 657 |
+
super().__init__()
|
| 658 |
+
self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
|
| 659 |
+
ResnetBlock(in_channels=in_channels,
|
| 660 |
+
out_channels=2 * in_channels,
|
| 661 |
+
temb_channels=0, dropout=0.0),
|
| 662 |
+
ResnetBlock(in_channels=2 * in_channels,
|
| 663 |
+
out_channels=4 * in_channels,
|
| 664 |
+
temb_channels=0, dropout=0.0),
|
| 665 |
+
ResnetBlock(in_channels=4 * in_channels,
|
| 666 |
+
out_channels=2 * in_channels,
|
| 667 |
+
temb_channels=0, dropout=0.0),
|
| 668 |
+
nn.Conv2d(2*in_channels, in_channels, 1),
|
| 669 |
+
Upsample(in_channels, with_conv=True)])
|
| 670 |
+
# end
|
| 671 |
+
self.norm_out = Normalize(in_channels)
|
| 672 |
+
self.conv_out = torch.nn.Conv2d(in_channels,
|
| 673 |
+
out_channels,
|
| 674 |
+
kernel_size=3,
|
| 675 |
+
stride=1,
|
| 676 |
+
padding=1)
|
| 677 |
+
|
| 678 |
+
def forward(self, x):
|
| 679 |
+
for i, layer in enumerate(self.model):
|
| 680 |
+
if i in [1,2,3]:
|
| 681 |
+
x = layer(x, None)
|
| 682 |
+
else:
|
| 683 |
+
x = layer(x)
|
| 684 |
+
|
| 685 |
+
h = self.norm_out(x)
|
| 686 |
+
h = nonlinearity(h)
|
| 687 |
+
x = self.conv_out(h)
|
| 688 |
+
return x
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
class UpsampleDecoder(nn.Module):
|
| 692 |
+
def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
|
| 693 |
+
ch_mult=(2,2), dropout=0.0):
|
| 694 |
+
super().__init__()
|
| 695 |
+
# upsampling
|
| 696 |
+
self.temb_ch = 0
|
| 697 |
+
self.num_resolutions = len(ch_mult)
|
| 698 |
+
self.num_res_blocks = num_res_blocks
|
| 699 |
+
block_in = in_channels
|
| 700 |
+
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
| 701 |
+
self.res_blocks = nn.ModuleList()
|
| 702 |
+
self.upsample_blocks = nn.ModuleList()
|
| 703 |
+
for i_level in range(self.num_resolutions):
|
| 704 |
+
res_block = []
|
| 705 |
+
block_out = ch * ch_mult[i_level]
|
| 706 |
+
for i_block in range(self.num_res_blocks + 1):
|
| 707 |
+
res_block.append(ResnetBlock(in_channels=block_in,
|
| 708 |
+
out_channels=block_out,
|
| 709 |
+
temb_channels=self.temb_ch,
|
| 710 |
+
dropout=dropout))
|
| 711 |
+
block_in = block_out
|
| 712 |
+
self.res_blocks.append(nn.ModuleList(res_block))
|
| 713 |
+
if i_level != self.num_resolutions - 1:
|
| 714 |
+
self.upsample_blocks.append(Upsample(block_in, True))
|
| 715 |
+
curr_res = curr_res * 2
|
| 716 |
+
|
| 717 |
+
# end
|
| 718 |
+
self.norm_out = Normalize(block_in)
|
| 719 |
+
self.conv_out = torch.nn.Conv2d(block_in,
|
| 720 |
+
out_channels,
|
| 721 |
+
kernel_size=3,
|
| 722 |
+
stride=1,
|
| 723 |
+
padding=1)
|
| 724 |
+
|
| 725 |
+
def forward(self, x):
|
| 726 |
+
# upsampling
|
| 727 |
+
h = x
|
| 728 |
+
for k, i_level in enumerate(range(self.num_resolutions)):
|
| 729 |
+
for i_block in range(self.num_res_blocks + 1):
|
| 730 |
+
h = self.res_blocks[i_level][i_block](h, None)
|
| 731 |
+
if i_level != self.num_resolutions - 1:
|
| 732 |
+
h = self.upsample_blocks[k](h)
|
| 733 |
+
h = self.norm_out(h)
|
| 734 |
+
h = nonlinearity(h)
|
| 735 |
+
h = self.conv_out(h)
|
| 736 |
+
return h
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
class LatentRescaler(nn.Module):
|
| 740 |
+
def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
|
| 741 |
+
super().__init__()
|
| 742 |
+
# residual block, interpolate, residual block
|
| 743 |
+
self.factor = factor
|
| 744 |
+
self.conv_in = nn.Conv2d(in_channels,
|
| 745 |
+
mid_channels,
|
| 746 |
+
kernel_size=3,
|
| 747 |
+
stride=1,
|
| 748 |
+
padding=1)
|
| 749 |
+
self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
|
| 750 |
+
out_channels=mid_channels,
|
| 751 |
+
temb_channels=0,
|
| 752 |
+
dropout=0.0) for _ in range(depth)])
|
| 753 |
+
self.attn = AttnBlock(mid_channels)
|
| 754 |
+
self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
|
| 755 |
+
out_channels=mid_channels,
|
| 756 |
+
temb_channels=0,
|
| 757 |
+
dropout=0.0) for _ in range(depth)])
|
| 758 |
+
|
| 759 |
+
self.conv_out = nn.Conv2d(mid_channels,
|
| 760 |
+
out_channels,
|
| 761 |
+
kernel_size=1,
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
def forward(self, x):
|
| 765 |
+
x = self.conv_in(x)
|
| 766 |
+
for block in self.res_block1:
|
| 767 |
+
x = block(x, None)
|
| 768 |
+
x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
|
| 769 |
+
x = self.attn(x)
|
| 770 |
+
for block in self.res_block2:
|
| 771 |
+
x = block(x, None)
|
| 772 |
+
x = self.conv_out(x)
|
| 773 |
+
return x
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
class MergedRescaleEncoder(nn.Module):
|
| 777 |
+
def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
|
| 778 |
+
attn_resolutions, dropout=0.0, resamp_with_conv=True,
|
| 779 |
+
ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
|
| 780 |
+
super().__init__()
|
| 781 |
+
intermediate_chn = ch * ch_mult[-1]
|
| 782 |
+
self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
|
| 783 |
+
z_channels=intermediate_chn, double_z=False, resolution=resolution,
|
| 784 |
+
attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
|
| 785 |
+
out_ch=None)
|
| 786 |
+
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
|
| 787 |
+
mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
|
| 788 |
+
|
| 789 |
+
def forward(self, x):
|
| 790 |
+
x = self.encoder(x)
|
| 791 |
+
x = self.rescaler(x)
|
| 792 |
+
return x
|
| 793 |
+
|
| 794 |
+
|
| 795 |
+
class MergedRescaleDecoder(nn.Module):
|
| 796 |
+
def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
|
| 797 |
+
dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
|
| 798 |
+
super().__init__()
|
| 799 |
+
tmp_chn = z_channels*ch_mult[-1]
|
| 800 |
+
self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
|
| 801 |
+
resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
|
| 802 |
+
ch_mult=ch_mult, resolution=resolution, ch=ch)
|
| 803 |
+
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
|
| 804 |
+
out_channels=tmp_chn, depth=rescale_module_depth)
|
| 805 |
+
|
| 806 |
+
def forward(self, x):
|
| 807 |
+
x = self.rescaler(x)
|
| 808 |
+
x = self.decoder(x)
|
| 809 |
+
return x
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
class Upsampler(nn.Module):
|
| 813 |
+
def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
|
| 814 |
+
super().__init__()
|
| 815 |
+
assert out_size >= in_size
|
| 816 |
+
num_blocks = int(np.log2(out_size//in_size))+1
|
| 817 |
+
factor_up = 1.+ (out_size % in_size)
|
| 818 |
+
print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
|
| 819 |
+
self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
|
| 820 |
+
out_channels=in_channels)
|
| 821 |
+
self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
|
| 822 |
+
attn_resolutions=[], in_channels=None, ch=in_channels,
|
| 823 |
+
ch_mult=[ch_mult for _ in range(num_blocks)])
|
| 824 |
+
|
| 825 |
+
def forward(self, x):
|
| 826 |
+
x = self.rescaler(x)
|
| 827 |
+
x = self.decoder(x)
|
| 828 |
+
return x
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
class Resize(nn.Module):
|
| 832 |
+
def __init__(self, in_channels=None, learned=False, mode="bilinear"):
|
| 833 |
+
super().__init__()
|
| 834 |
+
self.with_conv = learned
|
| 835 |
+
self.mode = mode
|
| 836 |
+
if self.with_conv:
|
| 837 |
+
print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
|
| 838 |
+
raise NotImplementedError()
|
| 839 |
+
assert in_channels is not None
|
| 840 |
+
# no asymmetric padding in torch conv, must do it ourselves
|
| 841 |
+
self.conv = torch.nn.Conv2d(in_channels,
|
| 842 |
+
in_channels,
|
| 843 |
+
kernel_size=4,
|
| 844 |
+
stride=2,
|
| 845 |
+
padding=1)
|
| 846 |
+
|
| 847 |
+
def forward(self, x, scale_factor=1.0):
|
| 848 |
+
if scale_factor==1.0:
|
| 849 |
+
return x
|
| 850 |
+
else:
|
| 851 |
+
x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
|
| 852 |
+
return x
|
CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/openaimodel.py
ADDED
|
@@ -0,0 +1,786 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import abstractmethod
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch as th
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from ldm.modules.diffusionmodules.util import (
|
| 10 |
+
checkpoint,
|
| 11 |
+
conv_nd,
|
| 12 |
+
linear,
|
| 13 |
+
avg_pool_nd,
|
| 14 |
+
zero_module,
|
| 15 |
+
normalization,
|
| 16 |
+
timestep_embedding,
|
| 17 |
+
)
|
| 18 |
+
from ldm.modules.attention import SpatialTransformer
|
| 19 |
+
from ldm.util import exists
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# dummy replace
|
| 23 |
+
def convert_module_to_f16(x):
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
def convert_module_to_f32(x):
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
## go
|
| 31 |
+
class AttentionPool2d(nn.Module):
|
| 32 |
+
"""
|
| 33 |
+
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
spacial_dim: int,
|
| 39 |
+
embed_dim: int,
|
| 40 |
+
num_heads_channels: int,
|
| 41 |
+
output_dim: int = None,
|
| 42 |
+
):
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
|
| 45 |
+
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
|
| 46 |
+
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
|
| 47 |
+
self.num_heads = embed_dim // num_heads_channels
|
| 48 |
+
self.attention = QKVAttention(self.num_heads)
|
| 49 |
+
|
| 50 |
+
def forward(self, x):
|
| 51 |
+
b, c, *_spatial = x.shape
|
| 52 |
+
x = x.reshape(b, c, -1) # NC(HW)
|
| 53 |
+
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
|
| 54 |
+
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
|
| 55 |
+
x = self.qkv_proj(x)
|
| 56 |
+
x = self.attention(x)
|
| 57 |
+
x = self.c_proj(x)
|
| 58 |
+
return x[:, :, 0]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class TimestepBlock(nn.Module):
|
| 62 |
+
"""
|
| 63 |
+
Any module where forward() takes timestep embeddings as a second argument.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
@abstractmethod
|
| 67 |
+
def forward(self, x, emb):
|
| 68 |
+
"""
|
| 69 |
+
Apply the module to `x` given `emb` timestep embeddings.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
| 74 |
+
"""
|
| 75 |
+
A sequential module that passes timestep embeddings to the children that
|
| 76 |
+
support it as an extra input.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
def forward(self, x, emb, context=None):
|
| 80 |
+
for layer in self:
|
| 81 |
+
if isinstance(layer, TimestepBlock):
|
| 82 |
+
x = layer(x, emb)
|
| 83 |
+
elif isinstance(layer, SpatialTransformer):
|
| 84 |
+
x = layer(x, context)
|
| 85 |
+
else:
|
| 86 |
+
x = layer(x)
|
| 87 |
+
return x
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class Upsample(nn.Module):
|
| 91 |
+
"""
|
| 92 |
+
An upsampling layer with an optional convolution.
|
| 93 |
+
:param channels: channels in the inputs and outputs.
|
| 94 |
+
:param use_conv: a bool determining if a convolution is applied.
|
| 95 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
| 96 |
+
upsampling occurs in the inner-two dimensions.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
|
| 100 |
+
super().__init__()
|
| 101 |
+
self.channels = channels
|
| 102 |
+
self.out_channels = out_channels or channels
|
| 103 |
+
self.use_conv = use_conv
|
| 104 |
+
self.dims = dims
|
| 105 |
+
if use_conv:
|
| 106 |
+
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
|
| 107 |
+
|
| 108 |
+
def forward(self, x):
|
| 109 |
+
assert x.shape[1] == self.channels
|
| 110 |
+
if self.dims == 3:
|
| 111 |
+
x = F.interpolate(
|
| 112 |
+
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
| 113 |
+
)
|
| 114 |
+
else:
|
| 115 |
+
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
| 116 |
+
if self.use_conv:
|
| 117 |
+
x = self.conv(x)
|
| 118 |
+
return x
|
| 119 |
+
|
| 120 |
+
class TransposedUpsample(nn.Module):
|
| 121 |
+
'Learned 2x upsampling without padding'
|
| 122 |
+
def __init__(self, channels, out_channels=None, ks=5):
|
| 123 |
+
super().__init__()
|
| 124 |
+
self.channels = channels
|
| 125 |
+
self.out_channels = out_channels or channels
|
| 126 |
+
|
| 127 |
+
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
|
| 128 |
+
|
| 129 |
+
def forward(self,x):
|
| 130 |
+
return self.up(x)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class Downsample(nn.Module):
|
| 134 |
+
"""
|
| 135 |
+
A downsampling layer with an optional convolution.
|
| 136 |
+
:param channels: channels in the inputs and outputs.
|
| 137 |
+
:param use_conv: a bool determining if a convolution is applied.
|
| 138 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
| 139 |
+
downsampling occurs in the inner-two dimensions.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
|
| 143 |
+
super().__init__()
|
| 144 |
+
self.channels = channels
|
| 145 |
+
self.out_channels = out_channels or channels
|
| 146 |
+
self.use_conv = use_conv
|
| 147 |
+
self.dims = dims
|
| 148 |
+
stride = 2 if dims != 3 else (1, 2, 2)
|
| 149 |
+
if use_conv:
|
| 150 |
+
self.op = conv_nd(
|
| 151 |
+
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
|
| 152 |
+
)
|
| 153 |
+
else:
|
| 154 |
+
assert self.channels == self.out_channels
|
| 155 |
+
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
| 156 |
+
|
| 157 |
+
def forward(self, x):
|
| 158 |
+
assert x.shape[1] == self.channels
|
| 159 |
+
return self.op(x)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class ResBlock(TimestepBlock):
|
| 163 |
+
"""
|
| 164 |
+
A residual block that can optionally change the number of channels.
|
| 165 |
+
:param channels: the number of input channels.
|
| 166 |
+
:param emb_channels: the number of timestep embedding channels.
|
| 167 |
+
:param dropout: the rate of dropout.
|
| 168 |
+
:param out_channels: if specified, the number of out channels.
|
| 169 |
+
:param use_conv: if True and out_channels is specified, use a spatial
|
| 170 |
+
convolution instead of a smaller 1x1 convolution to change the
|
| 171 |
+
channels in the skip connection.
|
| 172 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
| 173 |
+
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
| 174 |
+
:param up: if True, use this block for upsampling.
|
| 175 |
+
:param down: if True, use this block for downsampling.
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
def __init__(
|
| 179 |
+
self,
|
| 180 |
+
channels,
|
| 181 |
+
emb_channels,
|
| 182 |
+
dropout,
|
| 183 |
+
out_channels=None,
|
| 184 |
+
use_conv=False,
|
| 185 |
+
use_scale_shift_norm=False,
|
| 186 |
+
dims=2,
|
| 187 |
+
use_checkpoint=False,
|
| 188 |
+
up=False,
|
| 189 |
+
down=False,
|
| 190 |
+
):
|
| 191 |
+
super().__init__()
|
| 192 |
+
self.channels = channels
|
| 193 |
+
self.emb_channels = emb_channels
|
| 194 |
+
self.dropout = dropout
|
| 195 |
+
self.out_channels = out_channels or channels
|
| 196 |
+
self.use_conv = use_conv
|
| 197 |
+
self.use_checkpoint = use_checkpoint
|
| 198 |
+
self.use_scale_shift_norm = use_scale_shift_norm
|
| 199 |
+
|
| 200 |
+
self.in_layers = nn.Sequential(
|
| 201 |
+
normalization(channels),
|
| 202 |
+
nn.SiLU(),
|
| 203 |
+
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
self.updown = up or down
|
| 207 |
+
|
| 208 |
+
if up:
|
| 209 |
+
self.h_upd = Upsample(channels, False, dims)
|
| 210 |
+
self.x_upd = Upsample(channels, False, dims)
|
| 211 |
+
elif down:
|
| 212 |
+
self.h_upd = Downsample(channels, False, dims)
|
| 213 |
+
self.x_upd = Downsample(channels, False, dims)
|
| 214 |
+
else:
|
| 215 |
+
self.h_upd = self.x_upd = nn.Identity()
|
| 216 |
+
|
| 217 |
+
self.emb_layers = nn.Sequential(
|
| 218 |
+
nn.SiLU(),
|
| 219 |
+
linear(
|
| 220 |
+
emb_channels,
|
| 221 |
+
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
|
| 222 |
+
),
|
| 223 |
+
)
|
| 224 |
+
self.out_layers = nn.Sequential(
|
| 225 |
+
normalization(self.out_channels),
|
| 226 |
+
nn.SiLU(),
|
| 227 |
+
nn.Dropout(p=dropout),
|
| 228 |
+
zero_module(
|
| 229 |
+
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
|
| 230 |
+
),
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
if self.out_channels == channels:
|
| 234 |
+
self.skip_connection = nn.Identity()
|
| 235 |
+
elif use_conv:
|
| 236 |
+
self.skip_connection = conv_nd(
|
| 237 |
+
dims, channels, self.out_channels, 3, padding=1
|
| 238 |
+
)
|
| 239 |
+
else:
|
| 240 |
+
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
| 241 |
+
|
| 242 |
+
def forward(self, x, emb):
|
| 243 |
+
"""
|
| 244 |
+
Apply the block to a Tensor, conditioned on a timestep embedding.
|
| 245 |
+
:param x: an [N x C x ...] Tensor of features.
|
| 246 |
+
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
|
| 247 |
+
:return: an [N x C x ...] Tensor of outputs.
|
| 248 |
+
"""
|
| 249 |
+
return checkpoint(
|
| 250 |
+
self._forward, (x, emb), self.parameters(), self.use_checkpoint
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def _forward(self, x, emb):
|
| 255 |
+
if self.updown:
|
| 256 |
+
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
|
| 257 |
+
h = in_rest(x)
|
| 258 |
+
h = self.h_upd(h)
|
| 259 |
+
x = self.x_upd(x)
|
| 260 |
+
h = in_conv(h)
|
| 261 |
+
else:
|
| 262 |
+
h = self.in_layers(x)
|
| 263 |
+
emb_out = self.emb_layers(emb).type(h.dtype)
|
| 264 |
+
while len(emb_out.shape) < len(h.shape):
|
| 265 |
+
emb_out = emb_out[..., None]
|
| 266 |
+
if self.use_scale_shift_norm:
|
| 267 |
+
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
| 268 |
+
scale, shift = th.chunk(emb_out, 2, dim=1)
|
| 269 |
+
h = out_norm(h) * (1 + scale) + shift
|
| 270 |
+
h = out_rest(h)
|
| 271 |
+
else:
|
| 272 |
+
h = h + emb_out
|
| 273 |
+
h = self.out_layers(h)
|
| 274 |
+
return self.skip_connection(x) + h
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class AttentionBlock(nn.Module):
|
| 278 |
+
"""
|
| 279 |
+
An attention block that allows spatial positions to attend to each other.
|
| 280 |
+
Originally ported from here, but adapted to the N-d case.
|
| 281 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
def __init__(
|
| 285 |
+
self,
|
| 286 |
+
channels,
|
| 287 |
+
num_heads=1,
|
| 288 |
+
num_head_channels=-1,
|
| 289 |
+
use_checkpoint=False,
|
| 290 |
+
use_new_attention_order=False,
|
| 291 |
+
):
|
| 292 |
+
super().__init__()
|
| 293 |
+
self.channels = channels
|
| 294 |
+
if num_head_channels == -1:
|
| 295 |
+
self.num_heads = num_heads
|
| 296 |
+
else:
|
| 297 |
+
assert (
|
| 298 |
+
channels % num_head_channels == 0
|
| 299 |
+
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
| 300 |
+
self.num_heads = channels // num_head_channels
|
| 301 |
+
self.use_checkpoint = use_checkpoint
|
| 302 |
+
self.norm = normalization(channels)
|
| 303 |
+
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
| 304 |
+
if use_new_attention_order:
|
| 305 |
+
# split qkv before split heads
|
| 306 |
+
self.attention = QKVAttention(self.num_heads)
|
| 307 |
+
else:
|
| 308 |
+
# split heads before split qkv
|
| 309 |
+
self.attention = QKVAttentionLegacy(self.num_heads)
|
| 310 |
+
|
| 311 |
+
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
| 312 |
+
|
| 313 |
+
def forward(self, x):
|
| 314 |
+
return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
|
| 315 |
+
#return pt_checkpoint(self._forward, x) # pytorch
|
| 316 |
+
|
| 317 |
+
def _forward(self, x):
|
| 318 |
+
b, c, *spatial = x.shape
|
| 319 |
+
x = x.reshape(b, c, -1)
|
| 320 |
+
qkv = self.qkv(self.norm(x))
|
| 321 |
+
h = self.attention(qkv)
|
| 322 |
+
h = self.proj_out(h)
|
| 323 |
+
return (x + h).reshape(b, c, *spatial)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def count_flops_attn(model, _x, y):
|
| 327 |
+
"""
|
| 328 |
+
A counter for the `thop` package to count the operations in an
|
| 329 |
+
attention operation.
|
| 330 |
+
Meant to be used like:
|
| 331 |
+
macs, params = thop.profile(
|
| 332 |
+
model,
|
| 333 |
+
inputs=(inputs, timestamps),
|
| 334 |
+
custom_ops={QKVAttention: QKVAttention.count_flops},
|
| 335 |
+
)
|
| 336 |
+
"""
|
| 337 |
+
b, c, *spatial = y[0].shape
|
| 338 |
+
num_spatial = int(np.prod(spatial))
|
| 339 |
+
# We perform two matmuls with the same number of ops.
|
| 340 |
+
# The first computes the weight matrix, the second computes
|
| 341 |
+
# the combination of the value vectors.
|
| 342 |
+
matmul_ops = 2 * b * (num_spatial ** 2) * c
|
| 343 |
+
model.total_ops += th.DoubleTensor([matmul_ops])
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
class QKVAttentionLegacy(nn.Module):
|
| 347 |
+
"""
|
| 348 |
+
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
def __init__(self, n_heads):
|
| 352 |
+
super().__init__()
|
| 353 |
+
self.n_heads = n_heads
|
| 354 |
+
|
| 355 |
+
def forward(self, qkv):
|
| 356 |
+
"""
|
| 357 |
+
Apply QKV attention.
|
| 358 |
+
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
|
| 359 |
+
:return: an [N x (H * C) x T] tensor after attention.
|
| 360 |
+
"""
|
| 361 |
+
bs, width, length = qkv.shape
|
| 362 |
+
assert width % (3 * self.n_heads) == 0
|
| 363 |
+
ch = width // (3 * self.n_heads)
|
| 364 |
+
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
|
| 365 |
+
scale = 1 / math.sqrt(math.sqrt(ch))
|
| 366 |
+
weight = th.einsum(
|
| 367 |
+
"bct,bcs->bts", q * scale, k * scale
|
| 368 |
+
) # More stable with f16 than dividing afterwards
|
| 369 |
+
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 370 |
+
a = th.einsum("bts,bcs->bct", weight, v)
|
| 371 |
+
return a.reshape(bs, -1, length)
|
| 372 |
+
|
| 373 |
+
@staticmethod
|
| 374 |
+
def count_flops(model, _x, y):
|
| 375 |
+
return count_flops_attn(model, _x, y)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
class QKVAttention(nn.Module):
|
| 379 |
+
"""
|
| 380 |
+
A module which performs QKV attention and splits in a different order.
|
| 381 |
+
"""
|
| 382 |
+
|
| 383 |
+
def __init__(self, n_heads):
|
| 384 |
+
super().__init__()
|
| 385 |
+
self.n_heads = n_heads
|
| 386 |
+
|
| 387 |
+
def forward(self, qkv):
|
| 388 |
+
"""
|
| 389 |
+
Apply QKV attention.
|
| 390 |
+
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
|
| 391 |
+
:return: an [N x (H * C) x T] tensor after attention.
|
| 392 |
+
"""
|
| 393 |
+
bs, width, length = qkv.shape
|
| 394 |
+
assert width % (3 * self.n_heads) == 0
|
| 395 |
+
ch = width // (3 * self.n_heads)
|
| 396 |
+
q, k, v = qkv.chunk(3, dim=1)
|
| 397 |
+
scale = 1 / math.sqrt(math.sqrt(ch))
|
| 398 |
+
weight = th.einsum(
|
| 399 |
+
"bct,bcs->bts",
|
| 400 |
+
(q * scale).view(bs * self.n_heads, ch, length),
|
| 401 |
+
(k * scale).view(bs * self.n_heads, ch, length),
|
| 402 |
+
) # More stable with f16 than dividing afterwards
|
| 403 |
+
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 404 |
+
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
|
| 405 |
+
return a.reshape(bs, -1, length)
|
| 406 |
+
|
| 407 |
+
@staticmethod
|
| 408 |
+
def count_flops(model, _x, y):
|
| 409 |
+
return count_flops_attn(model, _x, y)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
class UNetModel(nn.Module):
|
| 413 |
+
"""
|
| 414 |
+
The full UNet model with attention and timestep embedding.
|
| 415 |
+
:param in_channels: channels in the input Tensor.
|
| 416 |
+
:param model_channels: base channel count for the model.
|
| 417 |
+
:param out_channels: channels in the output Tensor.
|
| 418 |
+
:param num_res_blocks: number of residual blocks per downsample.
|
| 419 |
+
:param attention_resolutions: a collection of downsample rates at which
|
| 420 |
+
attention will take place. May be a set, list, or tuple.
|
| 421 |
+
For example, if this contains 4, then at 4x downsampling, attention
|
| 422 |
+
will be used.
|
| 423 |
+
:param dropout: the dropout probability.
|
| 424 |
+
:param channel_mult: channel multiplier for each level of the UNet.
|
| 425 |
+
:param conv_resample: if True, use learned convolutions for upsampling and
|
| 426 |
+
downsampling.
|
| 427 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
| 428 |
+
:param num_classes: if specified (as an int), then this model will be
|
| 429 |
+
class-conditional with `num_classes` classes.
|
| 430 |
+
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
| 431 |
+
:param num_heads: the number of attention heads in each attention layer.
|
| 432 |
+
:param num_heads_channels: if specified, ignore num_heads and instead use
|
| 433 |
+
a fixed channel width per attention head.
|
| 434 |
+
:param num_heads_upsample: works with num_heads to set a different number
|
| 435 |
+
of heads for upsampling. Deprecated.
|
| 436 |
+
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
| 437 |
+
:param resblock_updown: use residual blocks for up/downsampling.
|
| 438 |
+
:param use_new_attention_order: use a different attention pattern for potentially
|
| 439 |
+
increased efficiency.
|
| 440 |
+
"""
|
| 441 |
+
|
| 442 |
+
def __init__(
|
| 443 |
+
self,
|
| 444 |
+
image_size,
|
| 445 |
+
in_channels,
|
| 446 |
+
model_channels,
|
| 447 |
+
out_channels,
|
| 448 |
+
num_res_blocks,
|
| 449 |
+
attention_resolutions,
|
| 450 |
+
dropout=0,
|
| 451 |
+
channel_mult=(1, 2, 4, 8),
|
| 452 |
+
conv_resample=True,
|
| 453 |
+
dims=2,
|
| 454 |
+
num_classes=None,
|
| 455 |
+
use_checkpoint=False,
|
| 456 |
+
use_fp16=False,
|
| 457 |
+
num_heads=-1,
|
| 458 |
+
num_head_channels=-1,
|
| 459 |
+
num_heads_upsample=-1,
|
| 460 |
+
use_scale_shift_norm=False,
|
| 461 |
+
resblock_updown=False,
|
| 462 |
+
use_new_attention_order=False,
|
| 463 |
+
use_spatial_transformer=False, # custom transformer support
|
| 464 |
+
transformer_depth=1, # custom transformer support
|
| 465 |
+
context_dim=None, # custom transformer support
|
| 466 |
+
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
|
| 467 |
+
legacy=True,
|
| 468 |
+
disable_self_attentions=None,
|
| 469 |
+
num_attention_blocks=None,
|
| 470 |
+
disable_middle_self_attn=False,
|
| 471 |
+
use_linear_in_transformer=False,
|
| 472 |
+
):
|
| 473 |
+
super().__init__()
|
| 474 |
+
if use_spatial_transformer:
|
| 475 |
+
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
|
| 476 |
+
|
| 477 |
+
if context_dim is not None:
|
| 478 |
+
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
| 479 |
+
from omegaconf.listconfig import ListConfig
|
| 480 |
+
if type(context_dim) == ListConfig:
|
| 481 |
+
context_dim = list(context_dim)
|
| 482 |
+
|
| 483 |
+
if num_heads_upsample == -1:
|
| 484 |
+
num_heads_upsample = num_heads
|
| 485 |
+
|
| 486 |
+
if num_heads == -1:
|
| 487 |
+
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
|
| 488 |
+
|
| 489 |
+
if num_head_channels == -1:
|
| 490 |
+
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
|
| 491 |
+
|
| 492 |
+
self.image_size = image_size
|
| 493 |
+
self.in_channels = in_channels
|
| 494 |
+
self.model_channels = model_channels
|
| 495 |
+
self.out_channels = out_channels
|
| 496 |
+
if isinstance(num_res_blocks, int):
|
| 497 |
+
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
| 498 |
+
else:
|
| 499 |
+
if len(num_res_blocks) != len(channel_mult):
|
| 500 |
+
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
|
| 501 |
+
"as a list/tuple (per-level) with the same length as channel_mult")
|
| 502 |
+
self.num_res_blocks = num_res_blocks
|
| 503 |
+
if disable_self_attentions is not None:
|
| 504 |
+
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
|
| 505 |
+
assert len(disable_self_attentions) == len(channel_mult)
|
| 506 |
+
if num_attention_blocks is not None:
|
| 507 |
+
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
| 508 |
+
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
|
| 509 |
+
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
|
| 510 |
+
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
|
| 511 |
+
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
|
| 512 |
+
f"attention will still not be set.")
|
| 513 |
+
|
| 514 |
+
self.attention_resolutions = attention_resolutions
|
| 515 |
+
self.dropout = dropout
|
| 516 |
+
self.channel_mult = channel_mult
|
| 517 |
+
self.conv_resample = conv_resample
|
| 518 |
+
self.num_classes = num_classes
|
| 519 |
+
self.use_checkpoint = use_checkpoint
|
| 520 |
+
self.dtype = th.float16 if use_fp16 else th.float32
|
| 521 |
+
self.num_heads = num_heads
|
| 522 |
+
self.num_head_channels = num_head_channels
|
| 523 |
+
self.num_heads_upsample = num_heads_upsample
|
| 524 |
+
self.predict_codebook_ids = n_embed is not None
|
| 525 |
+
|
| 526 |
+
time_embed_dim = model_channels * 4
|
| 527 |
+
self.time_embed = nn.Sequential(
|
| 528 |
+
linear(model_channels, time_embed_dim),
|
| 529 |
+
nn.SiLU(),
|
| 530 |
+
linear(time_embed_dim, time_embed_dim),
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
if self.num_classes is not None:
|
| 534 |
+
if isinstance(self.num_classes, int):
|
| 535 |
+
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
| 536 |
+
elif self.num_classes == "continuous":
|
| 537 |
+
print("setting up linear c_adm embedding layer")
|
| 538 |
+
self.label_emb = nn.Linear(1, time_embed_dim)
|
| 539 |
+
else:
|
| 540 |
+
raise ValueError()
|
| 541 |
+
|
| 542 |
+
self.input_blocks = nn.ModuleList(
|
| 543 |
+
[
|
| 544 |
+
TimestepEmbedSequential(
|
| 545 |
+
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
| 546 |
+
)
|
| 547 |
+
]
|
| 548 |
+
)
|
| 549 |
+
self._feature_size = model_channels
|
| 550 |
+
input_block_chans = [model_channels]
|
| 551 |
+
ch = model_channels
|
| 552 |
+
ds = 1
|
| 553 |
+
for level, mult in enumerate(channel_mult):
|
| 554 |
+
for nr in range(self.num_res_blocks[level]):
|
| 555 |
+
layers = [
|
| 556 |
+
ResBlock(
|
| 557 |
+
ch,
|
| 558 |
+
time_embed_dim,
|
| 559 |
+
dropout,
|
| 560 |
+
out_channels=mult * model_channels,
|
| 561 |
+
dims=dims,
|
| 562 |
+
use_checkpoint=use_checkpoint,
|
| 563 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 564 |
+
)
|
| 565 |
+
]
|
| 566 |
+
ch = mult * model_channels
|
| 567 |
+
if ds in attention_resolutions:
|
| 568 |
+
if num_head_channels == -1:
|
| 569 |
+
dim_head = ch // num_heads
|
| 570 |
+
else:
|
| 571 |
+
num_heads = ch // num_head_channels
|
| 572 |
+
dim_head = num_head_channels
|
| 573 |
+
if legacy:
|
| 574 |
+
#num_heads = 1
|
| 575 |
+
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
| 576 |
+
if exists(disable_self_attentions):
|
| 577 |
+
disabled_sa = disable_self_attentions[level]
|
| 578 |
+
else:
|
| 579 |
+
disabled_sa = False
|
| 580 |
+
|
| 581 |
+
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
|
| 582 |
+
layers.append(
|
| 583 |
+
AttentionBlock(
|
| 584 |
+
ch,
|
| 585 |
+
use_checkpoint=use_checkpoint,
|
| 586 |
+
num_heads=num_heads,
|
| 587 |
+
num_head_channels=dim_head,
|
| 588 |
+
use_new_attention_order=use_new_attention_order,
|
| 589 |
+
) if not use_spatial_transformer else SpatialTransformer(
|
| 590 |
+
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
| 591 |
+
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
| 592 |
+
use_checkpoint=use_checkpoint
|
| 593 |
+
)
|
| 594 |
+
)
|
| 595 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
| 596 |
+
self._feature_size += ch
|
| 597 |
+
input_block_chans.append(ch)
|
| 598 |
+
if level != len(channel_mult) - 1:
|
| 599 |
+
out_ch = ch
|
| 600 |
+
self.input_blocks.append(
|
| 601 |
+
TimestepEmbedSequential(
|
| 602 |
+
ResBlock(
|
| 603 |
+
ch,
|
| 604 |
+
time_embed_dim,
|
| 605 |
+
dropout,
|
| 606 |
+
out_channels=out_ch,
|
| 607 |
+
dims=dims,
|
| 608 |
+
use_checkpoint=use_checkpoint,
|
| 609 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 610 |
+
down=True,
|
| 611 |
+
)
|
| 612 |
+
if resblock_updown
|
| 613 |
+
else Downsample(
|
| 614 |
+
ch, conv_resample, dims=dims, out_channels=out_ch
|
| 615 |
+
)
|
| 616 |
+
)
|
| 617 |
+
)
|
| 618 |
+
ch = out_ch
|
| 619 |
+
input_block_chans.append(ch)
|
| 620 |
+
ds *= 2
|
| 621 |
+
self._feature_size += ch
|
| 622 |
+
|
| 623 |
+
if num_head_channels == -1:
|
| 624 |
+
dim_head = ch // num_heads
|
| 625 |
+
else:
|
| 626 |
+
num_heads = ch // num_head_channels
|
| 627 |
+
dim_head = num_head_channels
|
| 628 |
+
if legacy:
|
| 629 |
+
#num_heads = 1
|
| 630 |
+
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
| 631 |
+
self.middle_block = TimestepEmbedSequential(
|
| 632 |
+
ResBlock(
|
| 633 |
+
ch,
|
| 634 |
+
time_embed_dim,
|
| 635 |
+
dropout,
|
| 636 |
+
dims=dims,
|
| 637 |
+
use_checkpoint=use_checkpoint,
|
| 638 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 639 |
+
),
|
| 640 |
+
AttentionBlock(
|
| 641 |
+
ch,
|
| 642 |
+
use_checkpoint=use_checkpoint,
|
| 643 |
+
num_heads=num_heads,
|
| 644 |
+
num_head_channels=dim_head,
|
| 645 |
+
use_new_attention_order=use_new_attention_order,
|
| 646 |
+
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
|
| 647 |
+
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
| 648 |
+
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
|
| 649 |
+
use_checkpoint=use_checkpoint
|
| 650 |
+
),
|
| 651 |
+
ResBlock(
|
| 652 |
+
ch,
|
| 653 |
+
time_embed_dim,
|
| 654 |
+
dropout,
|
| 655 |
+
dims=dims,
|
| 656 |
+
use_checkpoint=use_checkpoint,
|
| 657 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 658 |
+
),
|
| 659 |
+
)
|
| 660 |
+
self._feature_size += ch
|
| 661 |
+
|
| 662 |
+
self.output_blocks = nn.ModuleList([])
|
| 663 |
+
for level, mult in list(enumerate(channel_mult))[::-1]:
|
| 664 |
+
for i in range(self.num_res_blocks[level] + 1):
|
| 665 |
+
ich = input_block_chans.pop()
|
| 666 |
+
layers = [
|
| 667 |
+
ResBlock(
|
| 668 |
+
ch + ich,
|
| 669 |
+
time_embed_dim,
|
| 670 |
+
dropout,
|
| 671 |
+
out_channels=model_channels * mult,
|
| 672 |
+
dims=dims,
|
| 673 |
+
use_checkpoint=use_checkpoint,
|
| 674 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 675 |
+
)
|
| 676 |
+
]
|
| 677 |
+
ch = model_channels * mult
|
| 678 |
+
if ds in attention_resolutions:
|
| 679 |
+
if num_head_channels == -1:
|
| 680 |
+
dim_head = ch // num_heads
|
| 681 |
+
else:
|
| 682 |
+
num_heads = ch // num_head_channels
|
| 683 |
+
dim_head = num_head_channels
|
| 684 |
+
if legacy:
|
| 685 |
+
#num_heads = 1
|
| 686 |
+
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
| 687 |
+
if exists(disable_self_attentions):
|
| 688 |
+
disabled_sa = disable_self_attentions[level]
|
| 689 |
+
else:
|
| 690 |
+
disabled_sa = False
|
| 691 |
+
|
| 692 |
+
if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
|
| 693 |
+
layers.append(
|
| 694 |
+
AttentionBlock(
|
| 695 |
+
ch,
|
| 696 |
+
use_checkpoint=use_checkpoint,
|
| 697 |
+
num_heads=num_heads_upsample,
|
| 698 |
+
num_head_channels=dim_head,
|
| 699 |
+
use_new_attention_order=use_new_attention_order,
|
| 700 |
+
) if not use_spatial_transformer else SpatialTransformer(
|
| 701 |
+
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
| 702 |
+
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
| 703 |
+
use_checkpoint=use_checkpoint
|
| 704 |
+
)
|
| 705 |
+
)
|
| 706 |
+
if level and i == self.num_res_blocks[level]:
|
| 707 |
+
out_ch = ch
|
| 708 |
+
layers.append(
|
| 709 |
+
ResBlock(
|
| 710 |
+
ch,
|
| 711 |
+
time_embed_dim,
|
| 712 |
+
dropout,
|
| 713 |
+
out_channels=out_ch,
|
| 714 |
+
dims=dims,
|
| 715 |
+
use_checkpoint=use_checkpoint,
|
| 716 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 717 |
+
up=True,
|
| 718 |
+
)
|
| 719 |
+
if resblock_updown
|
| 720 |
+
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
|
| 721 |
+
)
|
| 722 |
+
ds //= 2
|
| 723 |
+
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
| 724 |
+
self._feature_size += ch
|
| 725 |
+
|
| 726 |
+
self.out = nn.Sequential(
|
| 727 |
+
normalization(ch),
|
| 728 |
+
nn.SiLU(),
|
| 729 |
+
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
| 730 |
+
)
|
| 731 |
+
if self.predict_codebook_ids:
|
| 732 |
+
self.id_predictor = nn.Sequential(
|
| 733 |
+
normalization(ch),
|
| 734 |
+
conv_nd(dims, model_channels, n_embed, 1),
|
| 735 |
+
#nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
def convert_to_fp16(self):
|
| 739 |
+
"""
|
| 740 |
+
Convert the torso of the model to float16.
|
| 741 |
+
"""
|
| 742 |
+
self.input_blocks.apply(convert_module_to_f16)
|
| 743 |
+
self.middle_block.apply(convert_module_to_f16)
|
| 744 |
+
self.output_blocks.apply(convert_module_to_f16)
|
| 745 |
+
|
| 746 |
+
def convert_to_fp32(self):
|
| 747 |
+
"""
|
| 748 |
+
Convert the torso of the model to float32.
|
| 749 |
+
"""
|
| 750 |
+
self.input_blocks.apply(convert_module_to_f32)
|
| 751 |
+
self.middle_block.apply(convert_module_to_f32)
|
| 752 |
+
self.output_blocks.apply(convert_module_to_f32)
|
| 753 |
+
|
| 754 |
+
def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
|
| 755 |
+
"""
|
| 756 |
+
Apply the model to an input batch.
|
| 757 |
+
:param x: an [N x C x ...] Tensor of inputs.
|
| 758 |
+
:param timesteps: a 1-D batch of timesteps.
|
| 759 |
+
:param context: conditioning plugged in via crossattn
|
| 760 |
+
:param y: an [N] Tensor of labels, if class-conditional.
|
| 761 |
+
:return: an [N x C x ...] Tensor of outputs.
|
| 762 |
+
"""
|
| 763 |
+
assert (y is not None) == (
|
| 764 |
+
self.num_classes is not None
|
| 765 |
+
), "must specify y if and only if the model is class-conditional"
|
| 766 |
+
hs = []
|
| 767 |
+
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
| 768 |
+
emb = self.time_embed(t_emb)
|
| 769 |
+
|
| 770 |
+
if self.num_classes is not None:
|
| 771 |
+
assert y.shape[0] == x.shape[0]
|
| 772 |
+
emb = emb + self.label_emb(y)
|
| 773 |
+
|
| 774 |
+
h = x.type(self.dtype)
|
| 775 |
+
for module in self.input_blocks:
|
| 776 |
+
h = module(h, emb, context)
|
| 777 |
+
hs.append(h)
|
| 778 |
+
h = self.middle_block(h, emb, context)
|
| 779 |
+
for module in self.output_blocks:
|
| 780 |
+
h = th.cat([h, hs.pop()], dim=1)
|
| 781 |
+
h = module(h, emb, context)
|
| 782 |
+
h = h.type(x.dtype)
|
| 783 |
+
if self.predict_codebook_ids:
|
| 784 |
+
return self.id_predictor(h)
|
| 785 |
+
else:
|
| 786 |
+
return self.out(h)
|
CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/upscaling.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
from ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule
|
| 7 |
+
from ldm.util import default
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class AbstractLowScaleModel(nn.Module):
|
| 11 |
+
# for concatenating a downsampled image to the latent representation
|
| 12 |
+
def __init__(self, noise_schedule_config=None):
|
| 13 |
+
super(AbstractLowScaleModel, self).__init__()
|
| 14 |
+
if noise_schedule_config is not None:
|
| 15 |
+
self.register_schedule(**noise_schedule_config)
|
| 16 |
+
|
| 17 |
+
def register_schedule(self, beta_schedule="linear", timesteps=1000,
|
| 18 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
| 19 |
+
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
| 20 |
+
cosine_s=cosine_s)
|
| 21 |
+
alphas = 1. - betas
|
| 22 |
+
alphas_cumprod = np.cumprod(alphas, axis=0)
|
| 23 |
+
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
| 24 |
+
|
| 25 |
+
timesteps, = betas.shape
|
| 26 |
+
self.num_timesteps = int(timesteps)
|
| 27 |
+
self.linear_start = linear_start
|
| 28 |
+
self.linear_end = linear_end
|
| 29 |
+
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
| 30 |
+
|
| 31 |
+
to_torch = partial(torch.tensor, dtype=torch.float32)
|
| 32 |
+
|
| 33 |
+
self.register_buffer('betas', to_torch(betas))
|
| 34 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
| 35 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
| 36 |
+
|
| 37 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
| 38 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
| 39 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
| 40 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
| 41 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
| 42 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
| 43 |
+
|
| 44 |
+
def q_sample(self, x_start, t, noise=None):
|
| 45 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
| 46 |
+
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
| 47 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
| 48 |
+
|
| 49 |
+
def forward(self, x):
|
| 50 |
+
return x, None
|
| 51 |
+
|
| 52 |
+
def decode(self, x):
|
| 53 |
+
return x
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class SimpleImageConcat(AbstractLowScaleModel):
|
| 57 |
+
# no noise level conditioning
|
| 58 |
+
def __init__(self):
|
| 59 |
+
super(SimpleImageConcat, self).__init__(noise_schedule_config=None)
|
| 60 |
+
self.max_noise_level = 0
|
| 61 |
+
|
| 62 |
+
def forward(self, x):
|
| 63 |
+
# fix to constant noise level
|
| 64 |
+
return x, torch.zeros(x.shape[0], device=x.device).long()
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel):
|
| 68 |
+
def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False):
|
| 69 |
+
super().__init__(noise_schedule_config=noise_schedule_config)
|
| 70 |
+
self.max_noise_level = max_noise_level
|
| 71 |
+
|
| 72 |
+
def forward(self, x, noise_level=None):
|
| 73 |
+
if noise_level is None:
|
| 74 |
+
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
|
| 75 |
+
else:
|
| 76 |
+
assert isinstance(noise_level, torch.Tensor)
|
| 77 |
+
z = self.q_sample(x, noise_level)
|
| 78 |
+
return z, noise_level
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
|
CCEdit-main/src/controlnet11/ldm/modules/diffusionmodules/util.py
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# adopted from
|
| 2 |
+
# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
|
| 3 |
+
# and
|
| 4 |
+
# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
| 5 |
+
# and
|
| 6 |
+
# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
|
| 7 |
+
#
|
| 8 |
+
# thanks!
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import math
|
| 13 |
+
import torch
|
| 14 |
+
import torch.nn as nn
|
| 15 |
+
import numpy as np
|
| 16 |
+
from einops import repeat
|
| 17 |
+
|
| 18 |
+
from ldm.util import instantiate_from_config
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
| 22 |
+
if schedule == "linear":
|
| 23 |
+
betas = (
|
| 24 |
+
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
elif schedule == "cosine":
|
| 28 |
+
timesteps = (
|
| 29 |
+
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
|
| 30 |
+
)
|
| 31 |
+
alphas = timesteps / (1 + cosine_s) * np.pi / 2
|
| 32 |
+
alphas = torch.cos(alphas).pow(2)
|
| 33 |
+
alphas = alphas / alphas[0]
|
| 34 |
+
betas = 1 - alphas[1:] / alphas[:-1]
|
| 35 |
+
betas = np.clip(betas, a_min=0, a_max=0.999)
|
| 36 |
+
|
| 37 |
+
elif schedule == "sqrt_linear":
|
| 38 |
+
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
|
| 39 |
+
elif schedule == "sqrt":
|
| 40 |
+
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
|
| 41 |
+
else:
|
| 42 |
+
raise ValueError(f"schedule '{schedule}' unknown.")
|
| 43 |
+
return betas.numpy()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
|
| 47 |
+
if ddim_discr_method == 'uniform':
|
| 48 |
+
c = num_ddpm_timesteps // num_ddim_timesteps
|
| 49 |
+
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
|
| 50 |
+
elif ddim_discr_method == 'quad':
|
| 51 |
+
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
|
| 52 |
+
else:
|
| 53 |
+
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
|
| 54 |
+
|
| 55 |
+
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
|
| 56 |
+
# add one to get the final alpha values right (the ones from first scale to data during sampling)
|
| 57 |
+
steps_out = ddim_timesteps + 1
|
| 58 |
+
if verbose:
|
| 59 |
+
print(f'Selected timesteps for ddim sampler: {steps_out}')
|
| 60 |
+
return steps_out
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
|
| 64 |
+
# select alphas for computing the variance schedule
|
| 65 |
+
alphas = alphacums[ddim_timesteps]
|
| 66 |
+
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
|
| 67 |
+
|
| 68 |
+
# according the the formula provided in https://arxiv.org/abs/2010.02502
|
| 69 |
+
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
|
| 70 |
+
if verbose:
|
| 71 |
+
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
|
| 72 |
+
print(f'For the chosen value of eta, which is {eta}, '
|
| 73 |
+
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
|
| 74 |
+
return sigmas, alphas, alphas_prev
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
|
| 78 |
+
"""
|
| 79 |
+
Create a beta schedule that discretizes the given alpha_t_bar function,
|
| 80 |
+
which defines the cumulative product of (1-beta) over time from t = [0,1].
|
| 81 |
+
:param num_diffusion_timesteps: the number of betas to produce.
|
| 82 |
+
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
|
| 83 |
+
produces the cumulative product of (1-beta) up to that
|
| 84 |
+
part of the diffusion process.
|
| 85 |
+
:param max_beta: the maximum beta to use; use values lower than 1 to
|
| 86 |
+
prevent singularities.
|
| 87 |
+
"""
|
| 88 |
+
betas = []
|
| 89 |
+
for i in range(num_diffusion_timesteps):
|
| 90 |
+
t1 = i / num_diffusion_timesteps
|
| 91 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
| 92 |
+
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
| 93 |
+
return np.array(betas)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def extract_into_tensor(a, t, x_shape):
|
| 97 |
+
b, *_ = t.shape
|
| 98 |
+
out = a.gather(-1, t)
|
| 99 |
+
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def checkpoint(func, inputs, params, flag):
|
| 103 |
+
"""
|
| 104 |
+
Evaluate a function without caching intermediate activations, allowing for
|
| 105 |
+
reduced memory at the expense of extra compute in the backward pass.
|
| 106 |
+
:param func: the function to evaluate.
|
| 107 |
+
:param inputs: the argument sequence to pass to `func`.
|
| 108 |
+
:param params: a sequence of parameters `func` depends on but does not
|
| 109 |
+
explicitly take as arguments.
|
| 110 |
+
:param flag: if False, disable gradient checkpointing.
|
| 111 |
+
"""
|
| 112 |
+
if flag:
|
| 113 |
+
args = tuple(inputs) + tuple(params)
|
| 114 |
+
return CheckpointFunction.apply(func, len(inputs), *args)
|
| 115 |
+
else:
|
| 116 |
+
return func(*inputs)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class CheckpointFunction(torch.autograd.Function):
|
| 120 |
+
@staticmethod
|
| 121 |
+
def forward(ctx, run_function, length, *args):
|
| 122 |
+
ctx.run_function = run_function
|
| 123 |
+
ctx.input_tensors = list(args[:length])
|
| 124 |
+
ctx.input_params = list(args[length:])
|
| 125 |
+
ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(),
|
| 126 |
+
"dtype": torch.get_autocast_gpu_dtype(),
|
| 127 |
+
"cache_enabled": torch.is_autocast_cache_enabled()}
|
| 128 |
+
with torch.no_grad():
|
| 129 |
+
output_tensors = ctx.run_function(*ctx.input_tensors)
|
| 130 |
+
return output_tensors
|
| 131 |
+
|
| 132 |
+
@staticmethod
|
| 133 |
+
def backward(ctx, *output_grads):
|
| 134 |
+
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
|
| 135 |
+
with torch.enable_grad(), \
|
| 136 |
+
torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
|
| 137 |
+
# Fixes a bug where the first op in run_function modifies the
|
| 138 |
+
# Tensor storage in place, which is not allowed for detach()'d
|
| 139 |
+
# Tensors.
|
| 140 |
+
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
|
| 141 |
+
output_tensors = ctx.run_function(*shallow_copies)
|
| 142 |
+
input_grads = torch.autograd.grad(
|
| 143 |
+
output_tensors,
|
| 144 |
+
ctx.input_tensors + ctx.input_params,
|
| 145 |
+
output_grads,
|
| 146 |
+
allow_unused=True,
|
| 147 |
+
)
|
| 148 |
+
del ctx.input_tensors
|
| 149 |
+
del ctx.input_params
|
| 150 |
+
del output_tensors
|
| 151 |
+
return (None, None) + input_grads
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
|
| 155 |
+
"""
|
| 156 |
+
Create sinusoidal timestep embeddings.
|
| 157 |
+
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
| 158 |
+
These may be fractional.
|
| 159 |
+
:param dim: the dimension of the output.
|
| 160 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
| 161 |
+
:return: an [N x dim] Tensor of positional embeddings.
|
| 162 |
+
"""
|
| 163 |
+
if not repeat_only:
|
| 164 |
+
half = dim // 2
|
| 165 |
+
freqs = torch.exp(
|
| 166 |
+
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
| 167 |
+
).to(device=timesteps.device)
|
| 168 |
+
args = timesteps[:, None].float() * freqs[None]
|
| 169 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 170 |
+
if dim % 2:
|
| 171 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 172 |
+
else:
|
| 173 |
+
embedding = repeat(timesteps, 'b -> b d', d=dim)
|
| 174 |
+
return embedding
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def zero_module(module):
|
| 178 |
+
"""
|
| 179 |
+
Zero out the parameters of a module and return it.
|
| 180 |
+
"""
|
| 181 |
+
for p in module.parameters():
|
| 182 |
+
p.detach().zero_()
|
| 183 |
+
return module
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def scale_module(module, scale):
|
| 187 |
+
"""
|
| 188 |
+
Scale the parameters of a module and return it.
|
| 189 |
+
"""
|
| 190 |
+
for p in module.parameters():
|
| 191 |
+
p.detach().mul_(scale)
|
| 192 |
+
return module
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def mean_flat(tensor):
|
| 196 |
+
"""
|
| 197 |
+
Take the mean over all non-batch dimensions.
|
| 198 |
+
"""
|
| 199 |
+
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def normalization(channels):
|
| 203 |
+
"""
|
| 204 |
+
Make a standard normalization layer.
|
| 205 |
+
:param channels: number of input channels.
|
| 206 |
+
:return: an nn.Module for normalization.
|
| 207 |
+
"""
|
| 208 |
+
return GroupNorm32(32, channels)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
|
| 212 |
+
class SiLU(nn.Module):
|
| 213 |
+
def forward(self, x):
|
| 214 |
+
return x * torch.sigmoid(x)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class GroupNorm32(nn.GroupNorm):
|
| 218 |
+
def forward(self, x):
|
| 219 |
+
return super().forward(x.float()).type(x.dtype)
|
| 220 |
+
|
| 221 |
+
def conv_nd(dims, *args, **kwargs):
|
| 222 |
+
"""
|
| 223 |
+
Create a 1D, 2D, or 3D convolution module.
|
| 224 |
+
"""
|
| 225 |
+
if dims == 1:
|
| 226 |
+
return nn.Conv1d(*args, **kwargs)
|
| 227 |
+
elif dims == 2:
|
| 228 |
+
return nn.Conv2d(*args, **kwargs)
|
| 229 |
+
elif dims == 3:
|
| 230 |
+
return nn.Conv3d(*args, **kwargs)
|
| 231 |
+
raise ValueError(f"unsupported dimensions: {dims}")
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def linear(*args, **kwargs):
|
| 235 |
+
"""
|
| 236 |
+
Create a linear module.
|
| 237 |
+
"""
|
| 238 |
+
return nn.Linear(*args, **kwargs)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def avg_pool_nd(dims, *args, **kwargs):
|
| 242 |
+
"""
|
| 243 |
+
Create a 1D, 2D, or 3D average pooling module.
|
| 244 |
+
"""
|
| 245 |
+
if dims == 1:
|
| 246 |
+
return nn.AvgPool1d(*args, **kwargs)
|
| 247 |
+
elif dims == 2:
|
| 248 |
+
return nn.AvgPool2d(*args, **kwargs)
|
| 249 |
+
elif dims == 3:
|
| 250 |
+
return nn.AvgPool3d(*args, **kwargs)
|
| 251 |
+
raise ValueError(f"unsupported dimensions: {dims}")
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class HybridConditioner(nn.Module):
|
| 255 |
+
|
| 256 |
+
def __init__(self, c_concat_config, c_crossattn_config):
|
| 257 |
+
super().__init__()
|
| 258 |
+
self.concat_conditioner = instantiate_from_config(c_concat_config)
|
| 259 |
+
self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
|
| 260 |
+
|
| 261 |
+
def forward(self, c_concat, c_crossattn):
|
| 262 |
+
c_concat = self.concat_conditioner(c_concat)
|
| 263 |
+
c_crossattn = self.crossattn_conditioner(c_crossattn)
|
| 264 |
+
return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def noise_like(shape, device, repeat=False):
|
| 268 |
+
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
|
| 269 |
+
noise = lambda: torch.randn(shape, device=device)
|
| 270 |
+
return repeat_noise() if repeat else noise()
|
CCEdit-main/src/controlnet11/ldm/modules/distributions/__init__.py
ADDED
|
File without changes
|
CCEdit-main/src/controlnet11/ldm/modules/distributions/distributions.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class AbstractDistribution:
|
| 6 |
+
def sample(self):
|
| 7 |
+
raise NotImplementedError()
|
| 8 |
+
|
| 9 |
+
def mode(self):
|
| 10 |
+
raise NotImplementedError()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DiracDistribution(AbstractDistribution):
|
| 14 |
+
def __init__(self, value):
|
| 15 |
+
self.value = value
|
| 16 |
+
|
| 17 |
+
def sample(self):
|
| 18 |
+
return self.value
|
| 19 |
+
|
| 20 |
+
def mode(self):
|
| 21 |
+
return self.value
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class DiagonalGaussianDistribution(object):
|
| 25 |
+
def __init__(self, parameters, deterministic=False):
|
| 26 |
+
self.parameters = parameters
|
| 27 |
+
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
|
| 28 |
+
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
| 29 |
+
self.deterministic = deterministic
|
| 30 |
+
self.std = torch.exp(0.5 * self.logvar)
|
| 31 |
+
self.var = torch.exp(self.logvar)
|
| 32 |
+
if self.deterministic:
|
| 33 |
+
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
|
| 34 |
+
|
| 35 |
+
def sample(self):
|
| 36 |
+
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
|
| 37 |
+
return x
|
| 38 |
+
|
| 39 |
+
def kl(self, other=None):
|
| 40 |
+
if self.deterministic:
|
| 41 |
+
return torch.Tensor([0.])
|
| 42 |
+
else:
|
| 43 |
+
if other is None:
|
| 44 |
+
return 0.5 * torch.sum(torch.pow(self.mean, 2)
|
| 45 |
+
+ self.var - 1.0 - self.logvar,
|
| 46 |
+
dim=[1, 2, 3])
|
| 47 |
+
else:
|
| 48 |
+
return 0.5 * torch.sum(
|
| 49 |
+
torch.pow(self.mean - other.mean, 2) / other.var
|
| 50 |
+
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
|
| 51 |
+
dim=[1, 2, 3])
|
| 52 |
+
|
| 53 |
+
def nll(self, sample, dims=[1,2,3]):
|
| 54 |
+
if self.deterministic:
|
| 55 |
+
return torch.Tensor([0.])
|
| 56 |
+
logtwopi = np.log(2.0 * np.pi)
|
| 57 |
+
return 0.5 * torch.sum(
|
| 58 |
+
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
| 59 |
+
dim=dims)
|
| 60 |
+
|
| 61 |
+
def mode(self):
|
| 62 |
+
return self.mean
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def normal_kl(mean1, logvar1, mean2, logvar2):
|
| 66 |
+
"""
|
| 67 |
+
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
|
| 68 |
+
Compute the KL divergence between two gaussians.
|
| 69 |
+
Shapes are automatically broadcasted, so batches can be compared to
|
| 70 |
+
scalars, among other use cases.
|
| 71 |
+
"""
|
| 72 |
+
tensor = None
|
| 73 |
+
for obj in (mean1, logvar1, mean2, logvar2):
|
| 74 |
+
if isinstance(obj, torch.Tensor):
|
| 75 |
+
tensor = obj
|
| 76 |
+
break
|
| 77 |
+
assert tensor is not None, "at least one argument must be a Tensor"
|
| 78 |
+
|
| 79 |
+
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
| 80 |
+
# Tensors, but it does not work for torch.exp().
|
| 81 |
+
logvar1, logvar2 = [
|
| 82 |
+
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
|
| 83 |
+
for x in (logvar1, logvar2)
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
return 0.5 * (
|
| 87 |
+
-1.0
|
| 88 |
+
+ logvar2
|
| 89 |
+
- logvar1
|
| 90 |
+
+ torch.exp(logvar1 - logvar2)
|
| 91 |
+
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
|
| 92 |
+
)
|
CCEdit-main/src/controlnet11/ldm/modules/ema.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class LitEma(nn.Module):
|
| 6 |
+
def __init__(self, model, decay=0.9999, use_num_upates=True):
|
| 7 |
+
super().__init__()
|
| 8 |
+
if decay < 0.0 or decay > 1.0:
|
| 9 |
+
raise ValueError('Decay must be between 0 and 1')
|
| 10 |
+
|
| 11 |
+
self.m_name2s_name = {}
|
| 12 |
+
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
|
| 13 |
+
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
|
| 14 |
+
else torch.tensor(-1, dtype=torch.int))
|
| 15 |
+
|
| 16 |
+
for name, p in model.named_parameters():
|
| 17 |
+
if p.requires_grad:
|
| 18 |
+
# remove as '.'-character is not allowed in buffers
|
| 19 |
+
s_name = name.replace('.', '')
|
| 20 |
+
self.m_name2s_name.update({name: s_name})
|
| 21 |
+
self.register_buffer(s_name, p.clone().detach().data)
|
| 22 |
+
|
| 23 |
+
self.collected_params = []
|
| 24 |
+
|
| 25 |
+
def reset_num_updates(self):
|
| 26 |
+
del self.num_updates
|
| 27 |
+
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
|
| 28 |
+
|
| 29 |
+
def forward(self, model):
|
| 30 |
+
decay = self.decay
|
| 31 |
+
|
| 32 |
+
if self.num_updates >= 0:
|
| 33 |
+
self.num_updates += 1
|
| 34 |
+
decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
|
| 35 |
+
|
| 36 |
+
one_minus_decay = 1.0 - decay
|
| 37 |
+
|
| 38 |
+
with torch.no_grad():
|
| 39 |
+
m_param = dict(model.named_parameters())
|
| 40 |
+
shadow_params = dict(self.named_buffers())
|
| 41 |
+
|
| 42 |
+
for key in m_param:
|
| 43 |
+
if m_param[key].requires_grad:
|
| 44 |
+
sname = self.m_name2s_name[key]
|
| 45 |
+
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
|
| 46 |
+
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
|
| 47 |
+
else:
|
| 48 |
+
assert not key in self.m_name2s_name
|
| 49 |
+
|
| 50 |
+
def copy_to(self, model):
|
| 51 |
+
m_param = dict(model.named_parameters())
|
| 52 |
+
shadow_params = dict(self.named_buffers())
|
| 53 |
+
for key in m_param:
|
| 54 |
+
if m_param[key].requires_grad:
|
| 55 |
+
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
|
| 56 |
+
else:
|
| 57 |
+
assert not key in self.m_name2s_name
|
| 58 |
+
|
| 59 |
+
def store(self, parameters):
|
| 60 |
+
"""
|
| 61 |
+
Save the current parameters for restoring later.
|
| 62 |
+
Args:
|
| 63 |
+
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
|
| 64 |
+
temporarily stored.
|
| 65 |
+
"""
|
| 66 |
+
self.collected_params = [param.clone() for param in parameters]
|
| 67 |
+
|
| 68 |
+
def restore(self, parameters):
|
| 69 |
+
"""
|
| 70 |
+
Restore the parameters stored with the `store` method.
|
| 71 |
+
Useful to validate the model with EMA parameters without affecting the
|
| 72 |
+
original optimization process. Store the parameters before the
|
| 73 |
+
`copy_to` method. After validation (or model saving), use this to
|
| 74 |
+
restore the former parameters.
|
| 75 |
+
Args:
|
| 76 |
+
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
|
| 77 |
+
updated with the stored parameters.
|
| 78 |
+
"""
|
| 79 |
+
for c_param, param in zip(self.collected_params, parameters):
|
| 80 |
+
param.data.copy_(c_param.data)
|