Upload folder using huggingface_hub
Browse files- .gitignore +167 -0
- DWT_IDWT/DWT_IDWT_Functions.py +208 -0
- DWT_IDWT/DWT_IDWT_layer.py +666 -0
- DWT_IDWT/__init__.py +0 -0
- LICENSE +21 -0
- README.md +145 -0
- assets/wdm.png +3 -0
- environment.yml +22 -0
- eval/activations/activations.txt +1 -0
- eval/eval_environment.yml +19 -0
- eval/fid.py +214 -0
- eval/model.py +101 -0
- eval/models/resnet.py +245 -0
- eval/ms_ssim.py +70 -0
- eval/pretrained/pretrained.txt +3 -0
- guided_diffusion/__init__.py +3 -0
- guided_diffusion/bratsloader.py +85 -0
- guided_diffusion/dist_util.py +107 -0
- guided_diffusion/gaussian_diffusion.py +1222 -0
- guided_diffusion/inpaintloader.py +119 -0
- guided_diffusion/lidcloader.py +70 -0
- guided_diffusion/logger.py +495 -0
- guided_diffusion/losses.py +77 -0
- guided_diffusion/nn.py +170 -0
- guided_diffusion/pretrain_checks.py +56 -0
- guided_diffusion/resample.py +154 -0
- guided_diffusion/respace.py +135 -0
- guided_diffusion/script_util.py +574 -0
- guided_diffusion/train_util.py +484 -0
- guided_diffusion/unet.py +1044 -0
- guided_diffusion/wunet.py +795 -0
- run.sh +131 -0
- scripts/generation_sample.py +170 -0
- scripts/generation_train.py +184 -0
- utils/preproc_lidc-idri.py +92 -0
.gitignore
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# Defined folders
|
| 7 |
+
./data/
|
| 8 |
+
./results/
|
| 9 |
+
./runs/
|
| 10 |
+
|
| 11 |
+
*.npy
|
| 12 |
+
|
| 13 |
+
# C extensions
|
| 14 |
+
*.so
|
| 15 |
+
|
| 16 |
+
# Distribution / packaging
|
| 17 |
+
.Python
|
| 18 |
+
build/
|
| 19 |
+
develop-eggs/
|
| 20 |
+
dist/
|
| 21 |
+
downloads/
|
| 22 |
+
eggs/
|
| 23 |
+
.eggs/
|
| 24 |
+
lib/
|
| 25 |
+
lib64/
|
| 26 |
+
parts/
|
| 27 |
+
sdist/
|
| 28 |
+
var/
|
| 29 |
+
wheels/
|
| 30 |
+
share/python-wheels/
|
| 31 |
+
*.egg-info/
|
| 32 |
+
.installed.cfg
|
| 33 |
+
*.egg
|
| 34 |
+
MANIFEST
|
| 35 |
+
|
| 36 |
+
# PyInstaller
|
| 37 |
+
# Usually these files are written by a python script from a template
|
| 38 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 39 |
+
*.manifest
|
| 40 |
+
*.spec
|
| 41 |
+
|
| 42 |
+
# Installer logs
|
| 43 |
+
pip-log.txt
|
| 44 |
+
pip-delete-this-directory.txt
|
| 45 |
+
|
| 46 |
+
# Unit test / coverage reports
|
| 47 |
+
htmlcov/
|
| 48 |
+
.tox/
|
| 49 |
+
.nox/
|
| 50 |
+
.coverage
|
| 51 |
+
.coverage.*
|
| 52 |
+
.cache
|
| 53 |
+
nosetests.xml
|
| 54 |
+
coverage.xml
|
| 55 |
+
*.cover
|
| 56 |
+
*.py,cover
|
| 57 |
+
.hypothesis/
|
| 58 |
+
.pytest_cache/
|
| 59 |
+
cover/
|
| 60 |
+
|
| 61 |
+
# Translations
|
| 62 |
+
*.mo
|
| 63 |
+
*.pot
|
| 64 |
+
|
| 65 |
+
# Django stuff:
|
| 66 |
+
*.log
|
| 67 |
+
local_settings.py
|
| 68 |
+
db.sqlite3
|
| 69 |
+
db.sqlite3-journal
|
| 70 |
+
|
| 71 |
+
# Flask stuff:
|
| 72 |
+
instance/
|
| 73 |
+
.webassets-cache
|
| 74 |
+
|
| 75 |
+
# Scrapy stuff:
|
| 76 |
+
.scrapy
|
| 77 |
+
|
| 78 |
+
# Sphinx documentation
|
| 79 |
+
docs/_build/
|
| 80 |
+
|
| 81 |
+
# PyBuilder
|
| 82 |
+
.pybuilder/
|
| 83 |
+
target/
|
| 84 |
+
|
| 85 |
+
# Jupyter Notebook
|
| 86 |
+
.ipynb_checkpoints
|
| 87 |
+
|
| 88 |
+
# IPython
|
| 89 |
+
profile_default/
|
| 90 |
+
ipython_config.py
|
| 91 |
+
|
| 92 |
+
# pyenv
|
| 93 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 94 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 95 |
+
# .python-version
|
| 96 |
+
|
| 97 |
+
# pipenv
|
| 98 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 99 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 100 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 101 |
+
# install all needed dependencies.
|
| 102 |
+
#Pipfile.lock
|
| 103 |
+
|
| 104 |
+
# poetry
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 106 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 107 |
+
# commonly ignored for libraries.
|
| 108 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 109 |
+
#poetry.lock
|
| 110 |
+
|
| 111 |
+
# pdm
|
| 112 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 113 |
+
#pdm.lock
|
| 114 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 115 |
+
# in version control.
|
| 116 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 117 |
+
.pdm.toml
|
| 118 |
+
|
| 119 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 120 |
+
__pypackages__/
|
| 121 |
+
|
| 122 |
+
# Celery stuff
|
| 123 |
+
celerybeat-schedule
|
| 124 |
+
celerybeat.pid
|
| 125 |
+
|
| 126 |
+
# SageMath parsed files
|
| 127 |
+
*.sage.py
|
| 128 |
+
|
| 129 |
+
# Environments
|
| 130 |
+
.env
|
| 131 |
+
.venv
|
| 132 |
+
env/
|
| 133 |
+
venv/
|
| 134 |
+
ENV/
|
| 135 |
+
env.bak/
|
| 136 |
+
venv.bak/
|
| 137 |
+
|
| 138 |
+
# Spyder project settings
|
| 139 |
+
.spyderproject
|
| 140 |
+
.spyproject
|
| 141 |
+
|
| 142 |
+
# Rope project settings
|
| 143 |
+
.ropeproject
|
| 144 |
+
|
| 145 |
+
# mkdocs documentation
|
| 146 |
+
/site
|
| 147 |
+
|
| 148 |
+
# mypy
|
| 149 |
+
.mypy_cache/
|
| 150 |
+
.dmypy.json
|
| 151 |
+
dmypy.json
|
| 152 |
+
|
| 153 |
+
# Pyre type checker
|
| 154 |
+
.pyre/
|
| 155 |
+
|
| 156 |
+
# pytype static type analyzer
|
| 157 |
+
.pytype/
|
| 158 |
+
|
| 159 |
+
# Cython debug symbols
|
| 160 |
+
cython_debug/
|
| 161 |
+
|
| 162 |
+
# PyCharm
|
| 163 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 164 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 165 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 166 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 167 |
+
.idea/
|
DWT_IDWT/DWT_IDWT_Functions.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019, Adobe Inc. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike
|
| 4 |
+
# 4.0 International Public License. To view a copy of this license, visit
|
| 5 |
+
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
自定义pytorch函数,实现一维、二维、三维张量的DWT和IDWT,未考虑边界延拓
|
| 9 |
+
只有当图像行列数都是偶数,且重构滤波器组低频分量长度为2时,才能精确重构,否则在边界处有误差。
|
| 10 |
+
"""
|
| 11 |
+
import torch
|
| 12 |
+
from torch.autograd import Function
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class DWTFunction_1D(Function):
|
| 16 |
+
@staticmethod
|
| 17 |
+
def forward(ctx, input, matrix_Low, matrix_High):
|
| 18 |
+
ctx.save_for_backward(matrix_Low, matrix_High)
|
| 19 |
+
L = torch.matmul(input, matrix_Low.t())
|
| 20 |
+
H = torch.matmul(input, matrix_High.t())
|
| 21 |
+
return L, H
|
| 22 |
+
|
| 23 |
+
@staticmethod
|
| 24 |
+
def backward(ctx, grad_L, grad_H):
|
| 25 |
+
matrix_L, matrix_H = ctx.saved_variables
|
| 26 |
+
grad_input = torch.add(torch.matmul(
|
| 27 |
+
grad_L, matrix_L), torch.matmul(grad_H, matrix_H))
|
| 28 |
+
return grad_input, None, None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class IDWTFunction_1D(Function):
|
| 32 |
+
@staticmethod
|
| 33 |
+
def forward(ctx, input_L, input_H, matrix_L, matrix_H):
|
| 34 |
+
ctx.save_for_backward(matrix_L, matrix_H)
|
| 35 |
+
output = torch.add(torch.matmul(input_L, matrix_L),
|
| 36 |
+
torch.matmul(input_H, matrix_H))
|
| 37 |
+
return output
|
| 38 |
+
|
| 39 |
+
@staticmethod
|
| 40 |
+
def backward(ctx, grad_output):
|
| 41 |
+
matrix_L, matrix_H = ctx.saved_variables
|
| 42 |
+
grad_L = torch.matmul(grad_output, matrix_L.t())
|
| 43 |
+
grad_H = torch.matmul(grad_output, matrix_H.t())
|
| 44 |
+
return grad_L, grad_H, None, None
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class DWTFunction_2D(Function):
|
| 48 |
+
@staticmethod
|
| 49 |
+
def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
|
| 50 |
+
ctx.save_for_backward(matrix_Low_0, matrix_Low_1,
|
| 51 |
+
matrix_High_0, matrix_High_1)
|
| 52 |
+
L = torch.matmul(matrix_Low_0, input)
|
| 53 |
+
H = torch.matmul(matrix_High_0, input)
|
| 54 |
+
LL = torch.matmul(L, matrix_Low_1)
|
| 55 |
+
LH = torch.matmul(L, matrix_High_1)
|
| 56 |
+
HL = torch.matmul(H, matrix_Low_1)
|
| 57 |
+
HH = torch.matmul(H, matrix_High_1)
|
| 58 |
+
return LL, LH, HL, HH
|
| 59 |
+
|
| 60 |
+
@staticmethod
|
| 61 |
+
def backward(ctx, grad_LL, grad_LH, grad_HL, grad_HH):
|
| 62 |
+
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
|
| 63 |
+
grad_L = torch.add(torch.matmul(grad_LL, matrix_Low_1.t()),
|
| 64 |
+
torch.matmul(grad_LH, matrix_High_1.t()))
|
| 65 |
+
grad_H = torch.add(torch.matmul(grad_HL, matrix_Low_1.t()),
|
| 66 |
+
torch.matmul(grad_HH, matrix_High_1.t()))
|
| 67 |
+
grad_input = torch.add(torch.matmul(
|
| 68 |
+
matrix_Low_0.t(), grad_L), torch.matmul(matrix_High_0.t(), grad_H))
|
| 69 |
+
return grad_input, None, None, None, None
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class DWTFunction_2D_tiny(Function):
|
| 73 |
+
@staticmethod
|
| 74 |
+
def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
|
| 75 |
+
ctx.save_for_backward(matrix_Low_0, matrix_Low_1,
|
| 76 |
+
matrix_High_0, matrix_High_1)
|
| 77 |
+
L = torch.matmul(matrix_Low_0, input)
|
| 78 |
+
LL = torch.matmul(L, matrix_Low_1)
|
| 79 |
+
return LL
|
| 80 |
+
|
| 81 |
+
@staticmethod
|
| 82 |
+
def backward(ctx, grad_LL):
|
| 83 |
+
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
|
| 84 |
+
grad_L = torch.matmul(grad_LL, matrix_Low_1.t())
|
| 85 |
+
grad_input = torch.matmul(matrix_Low_0.t(), grad_L)
|
| 86 |
+
return grad_input, None, None, None, None
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class IDWTFunction_2D(Function):
|
| 90 |
+
@staticmethod
|
| 91 |
+
def forward(ctx, input_LL, input_LH, input_HL, input_HH,
|
| 92 |
+
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
|
| 93 |
+
ctx.save_for_backward(matrix_Low_0, matrix_Low_1,
|
| 94 |
+
matrix_High_0, matrix_High_1)
|
| 95 |
+
L = torch.add(torch.matmul(input_LL, matrix_Low_1.t()),
|
| 96 |
+
torch.matmul(input_LH, matrix_High_1.t()))
|
| 97 |
+
H = torch.add(torch.matmul(input_HL, matrix_Low_1.t()),
|
| 98 |
+
torch.matmul(input_HH, matrix_High_1.t()))
|
| 99 |
+
output = torch.add(torch.matmul(matrix_Low_0.t(), L),
|
| 100 |
+
torch.matmul(matrix_High_0.t(), H))
|
| 101 |
+
return output
|
| 102 |
+
|
| 103 |
+
@staticmethod
|
| 104 |
+
def backward(ctx, grad_output):
|
| 105 |
+
matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
|
| 106 |
+
grad_L = torch.matmul(matrix_Low_0, grad_output)
|
| 107 |
+
grad_H = torch.matmul(matrix_High_0, grad_output)
|
| 108 |
+
grad_LL = torch.matmul(grad_L, matrix_Low_1)
|
| 109 |
+
grad_LH = torch.matmul(grad_L, matrix_High_1)
|
| 110 |
+
grad_HL = torch.matmul(grad_H, matrix_Low_1)
|
| 111 |
+
grad_HH = torch.matmul(grad_H, matrix_High_1)
|
| 112 |
+
return grad_LL, grad_LH, grad_HL, grad_HH, None, None, None, None
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class DWTFunction_3D(Function):
|
| 116 |
+
@staticmethod
|
| 117 |
+
def forward(ctx, input,
|
| 118 |
+
matrix_Low_0, matrix_Low_1, matrix_Low_2,
|
| 119 |
+
matrix_High_0, matrix_High_1, matrix_High_2):
|
| 120 |
+
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_Low_2,
|
| 121 |
+
matrix_High_0, matrix_High_1, matrix_High_2)
|
| 122 |
+
L = torch.matmul(matrix_Low_0, input)
|
| 123 |
+
H = torch.matmul(matrix_High_0, input)
|
| 124 |
+
LL = torch.matmul(L, matrix_Low_1).transpose(dim0=2, dim1=3)
|
| 125 |
+
LH = torch.matmul(L, matrix_High_1).transpose(dim0=2, dim1=3)
|
| 126 |
+
HL = torch.matmul(H, matrix_Low_1).transpose(dim0=2, dim1=3)
|
| 127 |
+
HH = torch.matmul(H, matrix_High_1).transpose(dim0=2, dim1=3)
|
| 128 |
+
LLL = torch.matmul(matrix_Low_2, LL).transpose(dim0=2, dim1=3)
|
| 129 |
+
LLH = torch.matmul(matrix_Low_2, LH).transpose(dim0=2, dim1=3)
|
| 130 |
+
LHL = torch.matmul(matrix_Low_2, HL).transpose(dim0=2, dim1=3)
|
| 131 |
+
LHH = torch.matmul(matrix_Low_2, HH).transpose(dim0=2, dim1=3)
|
| 132 |
+
HLL = torch.matmul(matrix_High_2, LL).transpose(dim0=2, dim1=3)
|
| 133 |
+
HLH = torch.matmul(matrix_High_2, LH).transpose(dim0=2, dim1=3)
|
| 134 |
+
HHL = torch.matmul(matrix_High_2, HL).transpose(dim0=2, dim1=3)
|
| 135 |
+
HHH = torch.matmul(matrix_High_2, HH).transpose(dim0=2, dim1=3)
|
| 136 |
+
return LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH
|
| 137 |
+
|
| 138 |
+
@staticmethod
|
| 139 |
+
def backward(ctx, grad_LLL, grad_LLH, grad_LHL, grad_LHH,
|
| 140 |
+
grad_HLL, grad_HLH, grad_HHL, grad_HHH):
|
| 141 |
+
matrix_Low_0, matrix_Low_1, matrix_Low_2, matrix_High_0, matrix_High_1, matrix_High_2 = ctx.saved_variables
|
| 142 |
+
grad_LL = torch.add(torch.matmul(matrix_Low_2.t(), grad_LLL.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 143 |
+
matrix_High_2.t(), grad_HLL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 144 |
+
grad_LH = torch.add(torch.matmul(matrix_Low_2.t(), grad_LLH.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 145 |
+
matrix_High_2.t(), grad_HLH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 146 |
+
grad_HL = torch.add(torch.matmul(matrix_Low_2.t(), grad_LHL.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 147 |
+
matrix_High_2.t(), grad_HHL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 148 |
+
grad_HH = torch.add(torch.matmul(matrix_Low_2.t(), grad_LHH.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 149 |
+
matrix_High_2.t(), grad_HHH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 150 |
+
grad_L = torch.add(torch.matmul(grad_LL, matrix_Low_1.t()),
|
| 151 |
+
torch.matmul(grad_LH, matrix_High_1.t()))
|
| 152 |
+
grad_H = torch.add(torch.matmul(grad_HL, matrix_Low_1.t()),
|
| 153 |
+
torch.matmul(grad_HH, matrix_High_1.t()))
|
| 154 |
+
grad_input = torch.add(torch.matmul(
|
| 155 |
+
matrix_Low_0.t(), grad_L), torch.matmul(matrix_High_0.t(), grad_H))
|
| 156 |
+
return grad_input, None, None, None, None, None, None, None, None
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class IDWTFunction_3D(Function):
|
| 160 |
+
@staticmethod
|
| 161 |
+
def forward(ctx, input_LLL, input_LLH, input_LHL, input_LHH,
|
| 162 |
+
input_HLL, input_HLH, input_HHL, input_HHH,
|
| 163 |
+
matrix_Low_0, matrix_Low_1, matrix_Low_2,
|
| 164 |
+
matrix_High_0, matrix_High_1, matrix_High_2):
|
| 165 |
+
ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_Low_2,
|
| 166 |
+
matrix_High_0, matrix_High_1, matrix_High_2)
|
| 167 |
+
input_LL = torch.add(torch.matmul(matrix_Low_2.t(), input_LLL.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 168 |
+
matrix_High_2.t(), input_HLL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 169 |
+
input_LH = torch.add(torch.matmul(matrix_Low_2.t(), input_LLH.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 170 |
+
matrix_High_2.t(), input_HLH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 171 |
+
input_HL = torch.add(torch.matmul(matrix_Low_2.t(), input_LHL.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 172 |
+
matrix_High_2.t(), input_HHL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 173 |
+
input_HH = torch.add(torch.matmul(matrix_Low_2.t(), input_LHH.transpose(dim0=2, dim1=3)), torch.matmul(
|
| 174 |
+
matrix_High_2.t(), input_HHH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
|
| 175 |
+
input_L = torch.add(torch.matmul(input_LL, matrix_Low_1.t()),
|
| 176 |
+
torch.matmul(input_LH, matrix_High_1.t()))
|
| 177 |
+
input_H = torch.add(torch.matmul(input_HL, matrix_Low_1.t()),
|
| 178 |
+
torch.matmul(input_HH, matrix_High_1.t()))
|
| 179 |
+
output = torch.add(torch.matmul(matrix_Low_0.t(), input_L),
|
| 180 |
+
torch.matmul(matrix_High_0.t(), input_H))
|
| 181 |
+
return output
|
| 182 |
+
|
| 183 |
+
@staticmethod
|
| 184 |
+
def backward(ctx, grad_output):
|
| 185 |
+
matrix_Low_0, matrix_Low_1, matrix_Low_2, matrix_High_0, matrix_High_1, matrix_High_2 = ctx.saved_variables
|
| 186 |
+
grad_L = torch.matmul(matrix_Low_0, grad_output)
|
| 187 |
+
grad_H = torch.matmul(matrix_High_0, grad_output)
|
| 188 |
+
grad_LL = torch.matmul(grad_L, matrix_Low_1).transpose(dim0=2, dim1=3)
|
| 189 |
+
grad_LH = torch.matmul(grad_L, matrix_High_1).transpose(dim0=2, dim1=3)
|
| 190 |
+
grad_HL = torch.matmul(grad_H, matrix_Low_1).transpose(dim0=2, dim1=3)
|
| 191 |
+
grad_HH = torch.matmul(grad_H, matrix_High_1).transpose(dim0=2, dim1=3)
|
| 192 |
+
grad_LLL = torch.matmul(
|
| 193 |
+
matrix_Low_2, grad_LL).transpose(dim0=2, dim1=3)
|
| 194 |
+
grad_LLH = torch.matmul(
|
| 195 |
+
matrix_Low_2, grad_LH).transpose(dim0=2, dim1=3)
|
| 196 |
+
grad_LHL = torch.matmul(
|
| 197 |
+
matrix_Low_2, grad_HL).transpose(dim0=2, dim1=3)
|
| 198 |
+
grad_LHH = torch.matmul(
|
| 199 |
+
matrix_Low_2, grad_HH).transpose(dim0=2, dim1=3)
|
| 200 |
+
grad_HLL = torch.matmul(
|
| 201 |
+
matrix_High_2, grad_LL).transpose(dim0=2, dim1=3)
|
| 202 |
+
grad_HLH = torch.matmul(
|
| 203 |
+
matrix_High_2, grad_LH).transpose(dim0=2, dim1=3)
|
| 204 |
+
grad_HHL = torch.matmul(
|
| 205 |
+
matrix_High_2, grad_HL).transpose(dim0=2, dim1=3)
|
| 206 |
+
grad_HHH = torch.matmul(
|
| 207 |
+
matrix_High_2, grad_HH).transpose(dim0=2, dim1=3)
|
| 208 |
+
return grad_LLL, grad_LLH, grad_LHL, grad_LHH, grad_HLL, grad_HLH, grad_HHL, grad_HHH, None, None, None, None, None, None
|
DWT_IDWT/DWT_IDWT_layer.py
ADDED
|
@@ -0,0 +1,666 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
自定义 pytorch 层,实现一维、二维、三维张量的 DWT 和 IDWT,未考虑边界延拓
|
| 3 |
+
只有当图像行列数都是偶数,且重构滤波器组低频分量长度为 2 时,才能精确重构,否则在边界处有误差。
|
| 4 |
+
"""
|
| 5 |
+
import math
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pywt
|
| 9 |
+
import torch
|
| 10 |
+
from torch.nn import Module
|
| 11 |
+
|
| 12 |
+
from .DWT_IDWT_Functions import DWTFunction_1D, IDWTFunction_1D, \
|
| 13 |
+
DWTFunction_2D_tiny, DWTFunction_2D, IDWTFunction_2D, \
|
| 14 |
+
DWTFunction_3D, IDWTFunction_3D
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
__all__ = ['DWT_1D', 'IDWT_1D', 'DWT_2D',
|
| 18 |
+
'IDWT_2D', 'DWT_3D', 'IDWT_3D', 'DWT_2D_tiny']
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class DWT_1D(Module):
|
| 22 |
+
"""
|
| 23 |
+
input: the 1D data to be decomposed -- (N, C, Length)
|
| 24 |
+
output: lfc -- (N, C, Length/2)
|
| 25 |
+
hfc -- (N, C, Length/2)
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, wavename):
|
| 29 |
+
"""
|
| 30 |
+
1D discrete wavelet transform (DWT) for sequence decomposition
|
| 31 |
+
用于序列分解的一维离散小波变换 DWT
|
| 32 |
+
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
|
| 33 |
+
"""
|
| 34 |
+
super(DWT_1D, self).__init__()
|
| 35 |
+
wavelet = pywt.Wavelet(wavename)
|
| 36 |
+
self.band_low = wavelet.rec_lo
|
| 37 |
+
self.band_high = wavelet.rec_hi
|
| 38 |
+
assert len(self.band_low) == len(self.band_high)
|
| 39 |
+
self.band_length = len(self.band_low)
|
| 40 |
+
assert self.band_length % 2 == 0
|
| 41 |
+
self.band_length_half = math.floor(self.band_length / 2)
|
| 42 |
+
|
| 43 |
+
def get_matrix(self):
|
| 44 |
+
"""
|
| 45 |
+
生成变换矩阵
|
| 46 |
+
generating the matrices: \mathcal{L}, \mathcal{H}
|
| 47 |
+
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
|
| 48 |
+
"""
|
| 49 |
+
L1 = self.input_height
|
| 50 |
+
L = math.floor(L1 / 2)
|
| 51 |
+
matrix_h = np.zeros((L, L1 + self.band_length - 2))
|
| 52 |
+
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
|
| 53 |
+
end = None if self.band_length_half == 1 else (
|
| 54 |
+
- self.band_length_half + 1)
|
| 55 |
+
index = 0
|
| 56 |
+
for i in range(L):
|
| 57 |
+
for j in range(self.band_length):
|
| 58 |
+
matrix_h[i, index + j] = self.band_low[j]
|
| 59 |
+
index += 2
|
| 60 |
+
index = 0
|
| 61 |
+
for i in range(L1 - L):
|
| 62 |
+
for j in range(self.band_length):
|
| 63 |
+
matrix_g[i, index + j] = self.band_high[j]
|
| 64 |
+
index += 2
|
| 65 |
+
matrix_h = matrix_h[:, (self.band_length_half - 1):end]
|
| 66 |
+
matrix_g = matrix_g[:, (self.band_length_half - 1):end]
|
| 67 |
+
if torch.cuda.is_available():
|
| 68 |
+
self.matrix_low = torch.Tensor(matrix_h).cuda()
|
| 69 |
+
self.matrix_high = torch.Tensor(matrix_g).cuda()
|
| 70 |
+
else:
|
| 71 |
+
self.matrix_low = torch.Tensor(matrix_h)
|
| 72 |
+
self.matrix_high = torch.Tensor(matrix_g)
|
| 73 |
+
|
| 74 |
+
def forward(self, input):
|
| 75 |
+
"""
|
| 76 |
+
input_low_frequency_component = \mathcal{L} * input
|
| 77 |
+
input_high_frequency_component = \mathcal{H} * input
|
| 78 |
+
:param input: the data to be decomposed
|
| 79 |
+
:return: the low-frequency and high-frequency components of the input data
|
| 80 |
+
"""
|
| 81 |
+
assert len(input.size()) == 3
|
| 82 |
+
self.input_height = input.size()[-1]
|
| 83 |
+
self.get_matrix()
|
| 84 |
+
return DWTFunction_1D.apply(input, self.matrix_low, self.matrix_high)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class IDWT_1D(Module):
|
| 88 |
+
"""
|
| 89 |
+
input: lfc -- (N, C, Length/2)
|
| 90 |
+
hfc -- (N, C, Length/2)
|
| 91 |
+
output: the original data -- (N, C, Length)
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(self, wavename):
|
| 95 |
+
"""
|
| 96 |
+
1D inverse DWT (IDWT) for sequence reconstruction
|
| 97 |
+
用于序列重构的一维离散小波逆变换 IDWT
|
| 98 |
+
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
|
| 99 |
+
"""
|
| 100 |
+
super(IDWT_1D, self).__init__()
|
| 101 |
+
wavelet = pywt.Wavelet(wavename)
|
| 102 |
+
self.band_low = wavelet.dec_lo
|
| 103 |
+
self.band_high = wavelet.dec_hi
|
| 104 |
+
self.band_low.reverse()
|
| 105 |
+
self.band_high.reverse()
|
| 106 |
+
assert len(self.band_low) == len(self.band_high)
|
| 107 |
+
self.band_length = len(self.band_low)
|
| 108 |
+
assert self.band_length % 2 == 0
|
| 109 |
+
self.band_length_half = math.floor(self.band_length / 2)
|
| 110 |
+
|
| 111 |
+
def get_matrix(self):
|
| 112 |
+
"""
|
| 113 |
+
generating the matrices: \mathcal{L}, \mathcal{H}
|
| 114 |
+
生成变换矩阵
|
| 115 |
+
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
|
| 116 |
+
"""
|
| 117 |
+
L1 = self.input_height
|
| 118 |
+
L = math.floor(L1 / 2)
|
| 119 |
+
matrix_h = np.zeros((L, L1 + self.band_length - 2))
|
| 120 |
+
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
|
| 121 |
+
end = None if self.band_length_half == 1 else (
|
| 122 |
+
- self.band_length_half + 1)
|
| 123 |
+
index = 0
|
| 124 |
+
for i in range(L):
|
| 125 |
+
for j in range(self.band_length):
|
| 126 |
+
matrix_h[i, index + j] = self.band_low[j]
|
| 127 |
+
index += 2
|
| 128 |
+
index = 0
|
| 129 |
+
for i in range(L1 - L):
|
| 130 |
+
for j in range(self.band_length):
|
| 131 |
+
matrix_g[i, index + j] = self.band_high[j]
|
| 132 |
+
index += 2
|
| 133 |
+
matrix_h = matrix_h[:, (self.band_length_half - 1):end]
|
| 134 |
+
matrix_g = matrix_g[:, (self.band_length_half - 1):end]
|
| 135 |
+
if torch.cuda.is_available():
|
| 136 |
+
self.matrix_low = torch.Tensor(matrix_h).cuda()
|
| 137 |
+
self.matrix_high = torch.Tensor(matrix_g).cuda()
|
| 138 |
+
else:
|
| 139 |
+
self.matrix_low = torch.Tensor(matrix_h)
|
| 140 |
+
self.matrix_high = torch.Tensor(matrix_g)
|
| 141 |
+
|
| 142 |
+
def forward(self, L, H):
|
| 143 |
+
"""
|
| 144 |
+
:param L: the low-frequency component of the original data
|
| 145 |
+
:param H: the high-frequency component of the original data
|
| 146 |
+
:return: the original data
|
| 147 |
+
"""
|
| 148 |
+
assert len(L.size()) == len(H.size()) == 3
|
| 149 |
+
self.input_height = L.size()[-1] + H.size()[-1]
|
| 150 |
+
self.get_matrix()
|
| 151 |
+
return IDWTFunction_1D.apply(L, H, self.matrix_low, self.matrix_high)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class DWT_2D_tiny(Module):
|
| 155 |
+
"""
|
| 156 |
+
input: the 2D data to be decomposed -- (N, C, H, W)
|
| 157 |
+
output -- lfc: (N, C, H/2, W/2)
|
| 158 |
+
#hfc_lh: (N, C, H/2, W/2)
|
| 159 |
+
#hfc_hl: (N, C, H/2, W/2)
|
| 160 |
+
#hfc_hh: (N, C, H/2, W/2)
|
| 161 |
+
DWT_2D_tiny only outputs the low-frequency component, which is used in WaveCNet;
|
| 162 |
+
the all four components could be get using DWT_2D, which is used in WaveUNet.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
def __init__(self, wavename):
|
| 166 |
+
"""
|
| 167 |
+
2D discrete wavelet transform (DWT) for 2D image decomposition
|
| 168 |
+
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
|
| 169 |
+
"""
|
| 170 |
+
super(DWT_2D_tiny, self).__init__()
|
| 171 |
+
wavelet = pywt.Wavelet(wavename)
|
| 172 |
+
self.band_low = wavelet.rec_lo
|
| 173 |
+
self.band_high = wavelet.rec_hi
|
| 174 |
+
assert len(self.band_low) == len(self.band_high)
|
| 175 |
+
self.band_length = len(self.band_low)
|
| 176 |
+
assert self.band_length % 2 == 0
|
| 177 |
+
self.band_length_half = math.floor(self.band_length / 2)
|
| 178 |
+
|
| 179 |
+
def get_matrix(self):
|
| 180 |
+
"""
|
| 181 |
+
生成变换矩阵
|
| 182 |
+
generating the matrices: \mathcal{L}, \mathcal{H}
|
| 183 |
+
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
|
| 184 |
+
"""
|
| 185 |
+
L1 = np.max((self.input_height, self.input_width))
|
| 186 |
+
L = math.floor(L1 / 2)
|
| 187 |
+
matrix_h = np.zeros((L, L1 + self.band_length - 2))
|
| 188 |
+
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
|
| 189 |
+
end = None if self.band_length_half == 1 else (
|
| 190 |
+
- self.band_length_half + 1)
|
| 191 |
+
|
| 192 |
+
index = 0
|
| 193 |
+
for i in range(L):
|
| 194 |
+
for j in range(self.band_length):
|
| 195 |
+
matrix_h[i, index + j] = self.band_low[j]
|
| 196 |
+
index += 2
|
| 197 |
+
matrix_h_0 = matrix_h[0:(math.floor(
|
| 198 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 199 |
+
matrix_h_1 = matrix_h[0:(math.floor(
|
| 200 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 201 |
+
|
| 202 |
+
index = 0
|
| 203 |
+
for i in range(L1 - L):
|
| 204 |
+
for j in range(self.band_length):
|
| 205 |
+
matrix_g[i, index + j] = self.band_high[j]
|
| 206 |
+
index += 2
|
| 207 |
+
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
|
| 208 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 209 |
+
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
|
| 210 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 211 |
+
|
| 212 |
+
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
|
| 213 |
+
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
|
| 214 |
+
matrix_h_1 = np.transpose(matrix_h_1)
|
| 215 |
+
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
|
| 216 |
+
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
|
| 217 |
+
matrix_g_1 = np.transpose(matrix_g_1)
|
| 218 |
+
|
| 219 |
+
if torch.cuda.is_available():
|
| 220 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
|
| 221 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
|
| 222 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
|
| 223 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
|
| 224 |
+
else:
|
| 225 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0)
|
| 226 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1)
|
| 227 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0)
|
| 228 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1)
|
| 229 |
+
|
| 230 |
+
def forward(self, input):
|
| 231 |
+
"""
|
| 232 |
+
input_lfc = \mathcal{L} * input * \mathcal{L}^T
|
| 233 |
+
#input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
|
| 234 |
+
#input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
|
| 235 |
+
#input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
|
| 236 |
+
:param input: the 2D data to be decomposed
|
| 237 |
+
:return: the low-frequency component of the input 2D data
|
| 238 |
+
"""
|
| 239 |
+
assert len(input.size()) == 4
|
| 240 |
+
self.input_height = input.size()[-2]
|
| 241 |
+
self.input_width = input.size()[-1]
|
| 242 |
+
self.get_matrix()
|
| 243 |
+
return DWTFunction_2D_tiny.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class DWT_2D(Module):
|
| 247 |
+
"""
|
| 248 |
+
input: the 2D data to be decomposed -- (N, C, H, W)
|
| 249 |
+
output -- lfc: (N, C, H/2, W/2)
|
| 250 |
+
hfc_lh: (N, C, H/2, W/2)
|
| 251 |
+
hfc_hl: (N, C, H/2, W/2)
|
| 252 |
+
hfc_hh: (N, C, H/2, W/2)
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
def __init__(self, wavename):
|
| 256 |
+
"""
|
| 257 |
+
2D discrete wavelet transform (DWT) for 2D image decomposition
|
| 258 |
+
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
|
| 259 |
+
"""
|
| 260 |
+
super(DWT_2D, self).__init__()
|
| 261 |
+
wavelet = pywt.Wavelet(wavename)
|
| 262 |
+
self.band_low = wavelet.rec_lo
|
| 263 |
+
self.band_high = wavelet.rec_hi
|
| 264 |
+
assert len(self.band_low) == len(self.band_high)
|
| 265 |
+
self.band_length = len(self.band_low)
|
| 266 |
+
assert self.band_length % 2 == 0
|
| 267 |
+
self.band_length_half = math.floor(self.band_length / 2)
|
| 268 |
+
|
| 269 |
+
def get_matrix(self):
|
| 270 |
+
"""
|
| 271 |
+
生成变换矩阵
|
| 272 |
+
generating the matrices: \mathcal{L}, \mathcal{H}
|
| 273 |
+
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
|
| 274 |
+
"""
|
| 275 |
+
L1 = np.max((self.input_height, self.input_width))
|
| 276 |
+
L = math.floor(L1 / 2)
|
| 277 |
+
matrix_h = np.zeros((L, L1 + self.band_length - 2))
|
| 278 |
+
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
|
| 279 |
+
end = None if self.band_length_half == 1 else (
|
| 280 |
+
- self.band_length_half + 1)
|
| 281 |
+
|
| 282 |
+
index = 0
|
| 283 |
+
for i in range(L):
|
| 284 |
+
for j in range(self.band_length):
|
| 285 |
+
matrix_h[i, index + j] = self.band_low[j]
|
| 286 |
+
index += 2
|
| 287 |
+
matrix_h_0 = matrix_h[0:(math.floor(
|
| 288 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 289 |
+
matrix_h_1 = matrix_h[0:(math.floor(
|
| 290 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 291 |
+
|
| 292 |
+
index = 0
|
| 293 |
+
for i in range(L1 - L):
|
| 294 |
+
for j in range(self.band_length):
|
| 295 |
+
matrix_g[i, index + j] = self.band_high[j]
|
| 296 |
+
index += 2
|
| 297 |
+
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
|
| 298 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 299 |
+
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
|
| 300 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 301 |
+
|
| 302 |
+
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
|
| 303 |
+
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
|
| 304 |
+
matrix_h_1 = np.transpose(matrix_h_1)
|
| 305 |
+
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
|
| 306 |
+
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
|
| 307 |
+
matrix_g_1 = np.transpose(matrix_g_1)
|
| 308 |
+
|
| 309 |
+
if torch.cuda.is_available():
|
| 310 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
|
| 311 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
|
| 312 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
|
| 313 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
|
| 314 |
+
else:
|
| 315 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0)
|
| 316 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1)
|
| 317 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0)
|
| 318 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1)
|
| 319 |
+
|
| 320 |
+
def forward(self, input):
|
| 321 |
+
"""
|
| 322 |
+
input_lfc = \mathcal{L} * input * \mathcal{L}^T
|
| 323 |
+
input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
|
| 324 |
+
input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
|
| 325 |
+
input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
|
| 326 |
+
:param input: the 2D data to be decomposed
|
| 327 |
+
:return: the low-frequency and high-frequency components of the input 2D data
|
| 328 |
+
"""
|
| 329 |
+
assert len(input.size()) == 4
|
| 330 |
+
self.input_height = input.size()[-2]
|
| 331 |
+
self.input_width = input.size()[-1]
|
| 332 |
+
self.get_matrix()
|
| 333 |
+
return DWTFunction_2D.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
class IDWT_2D(Module):
|
| 337 |
+
"""
|
| 338 |
+
input: lfc -- (N, C, H/2, W/2)
|
| 339 |
+
hfc_lh -- (N, C, H/2, W/2)
|
| 340 |
+
hfc_hl -- (N, C, H/2, W/2)
|
| 341 |
+
hfc_hh -- (N, C, H/2, W/2)
|
| 342 |
+
output: the original 2D data -- (N, C, H, W)
|
| 343 |
+
"""
|
| 344 |
+
|
| 345 |
+
def __init__(self, wavename):
|
| 346 |
+
"""
|
| 347 |
+
2D inverse DWT (IDWT) for 2D image reconstruction
|
| 348 |
+
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
|
| 349 |
+
"""
|
| 350 |
+
super(IDWT_2D, self).__init__()
|
| 351 |
+
wavelet = pywt.Wavelet(wavename)
|
| 352 |
+
self.band_low = wavelet.dec_lo
|
| 353 |
+
self.band_low.reverse()
|
| 354 |
+
self.band_high = wavelet.dec_hi
|
| 355 |
+
self.band_high.reverse()
|
| 356 |
+
assert len(self.band_low) == len(self.band_high)
|
| 357 |
+
self.band_length = len(self.band_low)
|
| 358 |
+
assert self.band_length % 2 == 0
|
| 359 |
+
self.band_length_half = math.floor(self.band_length / 2)
|
| 360 |
+
|
| 361 |
+
def get_matrix(self):
|
| 362 |
+
"""
|
| 363 |
+
生成变换矩阵
|
| 364 |
+
generating the matrices: \mathcal{L}, \mathcal{H}
|
| 365 |
+
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
|
| 366 |
+
"""
|
| 367 |
+
L1 = np.max((self.input_height, self.input_width))
|
| 368 |
+
L = math.floor(L1 / 2)
|
| 369 |
+
matrix_h = np.zeros((L, L1 + self.band_length - 2))
|
| 370 |
+
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
|
| 371 |
+
end = None if self.band_length_half == 1 else (
|
| 372 |
+
- self.band_length_half + 1)
|
| 373 |
+
|
| 374 |
+
index = 0
|
| 375 |
+
for i in range(L):
|
| 376 |
+
for j in range(self.band_length):
|
| 377 |
+
matrix_h[i, index + j] = self.band_low[j]
|
| 378 |
+
index += 2
|
| 379 |
+
matrix_h_0 = matrix_h[0:(math.floor(
|
| 380 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 381 |
+
matrix_h_1 = matrix_h[0:(math.floor(
|
| 382 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 383 |
+
|
| 384 |
+
index = 0
|
| 385 |
+
for i in range(L1 - L):
|
| 386 |
+
for j in range(self.band_length):
|
| 387 |
+
matrix_g[i, index + j] = self.band_high[j]
|
| 388 |
+
index += 2
|
| 389 |
+
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
|
| 390 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 391 |
+
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
|
| 392 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 393 |
+
|
| 394 |
+
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
|
| 395 |
+
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
|
| 396 |
+
matrix_h_1 = np.transpose(matrix_h_1)
|
| 397 |
+
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
|
| 398 |
+
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
|
| 399 |
+
matrix_g_1 = np.transpose(matrix_g_1)
|
| 400 |
+
if torch.cuda.is_available():
|
| 401 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
|
| 402 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
|
| 403 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
|
| 404 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
|
| 405 |
+
else:
|
| 406 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0)
|
| 407 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1)
|
| 408 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0)
|
| 409 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1)
|
| 410 |
+
|
| 411 |
+
def forward(self, LL, LH, HL, HH):
|
| 412 |
+
"""
|
| 413 |
+
recontructing the original 2D data
|
| 414 |
+
the original 2D data = \mathcal{L}^T * lfc * \mathcal{L}
|
| 415 |
+
+ \mathcal{H}^T * hfc_lh * \mathcal{L}
|
| 416 |
+
+ \mathcal{L}^T * hfc_hl * \mathcal{H}
|
| 417 |
+
+ \mathcal{H}^T * hfc_hh * \mathcal{H}
|
| 418 |
+
:param LL: the low-frequency component
|
| 419 |
+
:param LH: the high-frequency component, hfc_lh
|
| 420 |
+
:param HL: the high-frequency component, hfc_hl
|
| 421 |
+
:param HH: the high-frequency component, hfc_hh
|
| 422 |
+
:return: the original 2D data
|
| 423 |
+
"""
|
| 424 |
+
assert len(LL.size()) == len(LH.size()) == len(
|
| 425 |
+
HL.size()) == len(HH.size()) == 4
|
| 426 |
+
self.input_height = LL.size()[-2] + HH.size()[-2]
|
| 427 |
+
self.input_width = LL.size()[-1] + HH.size()[-1]
|
| 428 |
+
self.get_matrix()
|
| 429 |
+
return IDWTFunction_2D.apply(LL, LH, HL, HH, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
class DWT_3D(Module):
|
| 433 |
+
"""
|
| 434 |
+
input: the 3D data to be decomposed -- (N, C, D, H, W)
|
| 435 |
+
output: lfc -- (N, C, D/2, H/2, W/2)
|
| 436 |
+
hfc_llh -- (N, C, D/2, H/2, W/2)
|
| 437 |
+
hfc_lhl -- (N, C, D/2, H/2, W/2)
|
| 438 |
+
hfc_lhh -- (N, C, D/2, H/2, W/2)
|
| 439 |
+
hfc_hll -- (N, C, D/2, H/2, W/2)
|
| 440 |
+
hfc_hlh -- (N, C, D/2, H/2, W/2)
|
| 441 |
+
hfc_hhl -- (N, C, D/2, H/2, W/2)
|
| 442 |
+
hfc_hhh -- (N, C, D/2, H/2, W/2)
|
| 443 |
+
"""
|
| 444 |
+
|
| 445 |
+
def __init__(self, wavename):
|
| 446 |
+
"""
|
| 447 |
+
3D discrete wavelet transform (DWT) for 3D data decomposition
|
| 448 |
+
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
|
| 449 |
+
"""
|
| 450 |
+
super(DWT_3D, self).__init__()
|
| 451 |
+
wavelet = pywt.Wavelet(wavename)
|
| 452 |
+
self.band_low = wavelet.rec_lo
|
| 453 |
+
self.band_high = wavelet.rec_hi
|
| 454 |
+
assert len(self.band_low) == len(self.band_high)
|
| 455 |
+
self.band_length = len(self.band_low)
|
| 456 |
+
assert self.band_length % 2 == 0
|
| 457 |
+
self.band_length_half = math.floor(self.band_length / 2)
|
| 458 |
+
|
| 459 |
+
def get_matrix(self):
|
| 460 |
+
"""
|
| 461 |
+
生成变换矩阵
|
| 462 |
+
generating the matrices: \mathcal{L}, \mathcal{H}
|
| 463 |
+
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
|
| 464 |
+
"""
|
| 465 |
+
L1 = np.max((self.input_height, self.input_width))
|
| 466 |
+
L = math.floor(L1 / 2)
|
| 467 |
+
matrix_h = np.zeros((L, L1 + self.band_length - 2))
|
| 468 |
+
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
|
| 469 |
+
end = None if self.band_length_half == 1 else (
|
| 470 |
+
- self.band_length_half + 1)
|
| 471 |
+
|
| 472 |
+
index = 0
|
| 473 |
+
for i in range(L):
|
| 474 |
+
for j in range(self.band_length):
|
| 475 |
+
matrix_h[i, index + j] = self.band_low[j]
|
| 476 |
+
index += 2
|
| 477 |
+
matrix_h_0 = matrix_h[0:(math.floor(
|
| 478 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 479 |
+
matrix_h_1 = matrix_h[0:(math.floor(
|
| 480 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 481 |
+
matrix_h_2 = matrix_h[0:(math.floor(
|
| 482 |
+
self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
|
| 483 |
+
|
| 484 |
+
index = 0
|
| 485 |
+
for i in range(L1 - L):
|
| 486 |
+
for j in range(self.band_length):
|
| 487 |
+
matrix_g[i, index + j] = self.band_high[j]
|
| 488 |
+
index += 2
|
| 489 |
+
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
|
| 490 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 491 |
+
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
|
| 492 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 493 |
+
matrix_g_2 = matrix_g[0:(self.input_depth - math.floor(
|
| 494 |
+
self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
|
| 495 |
+
|
| 496 |
+
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
|
| 497 |
+
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
|
| 498 |
+
matrix_h_1 = np.transpose(matrix_h_1)
|
| 499 |
+
matrix_h_2 = matrix_h_2[:, (self.band_length_half - 1):end]
|
| 500 |
+
|
| 501 |
+
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
|
| 502 |
+
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
|
| 503 |
+
matrix_g_1 = np.transpose(matrix_g_1)
|
| 504 |
+
matrix_g_2 = matrix_g_2[:, (self.band_length_half - 1):end]
|
| 505 |
+
if torch.cuda.is_available():
|
| 506 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
|
| 507 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
|
| 508 |
+
self.matrix_low_2 = torch.Tensor(matrix_h_2).cuda()
|
| 509 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
|
| 510 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
|
| 511 |
+
self.matrix_high_2 = torch.Tensor(matrix_g_2).cuda()
|
| 512 |
+
else:
|
| 513 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0)
|
| 514 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1)
|
| 515 |
+
self.matrix_low_2 = torch.Tensor(matrix_h_2)
|
| 516 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0)
|
| 517 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1)
|
| 518 |
+
self.matrix_high_2 = torch.Tensor(matrix_g_2)
|
| 519 |
+
|
| 520 |
+
def forward(self, input):
|
| 521 |
+
"""
|
| 522 |
+
:param input: the 3D data to be decomposed
|
| 523 |
+
:return: the eight components of the input data, one low-frequency and seven high-frequency components
|
| 524 |
+
"""
|
| 525 |
+
assert len(input.size()) == 5
|
| 526 |
+
self.input_depth = input.size()[-3]
|
| 527 |
+
self.input_height = input.size()[-2]
|
| 528 |
+
self.input_width = input.size()[-1]
|
| 529 |
+
self.get_matrix()
|
| 530 |
+
return DWTFunction_3D.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_low_2,
|
| 531 |
+
self.matrix_high_0, self.matrix_high_1, self.matrix_high_2)
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
class IDWT_3D(Module):
|
| 535 |
+
"""
|
| 536 |
+
input: lfc -- (N, C, D/2, H/2, W/2)
|
| 537 |
+
hfc_llh -- (N, C, D/2, H/2, W/2)
|
| 538 |
+
hfc_lhl -- (N, C, D/2, H/2, W/2)
|
| 539 |
+
hfc_lhh -- (N, C, D/2, H/2, W/2)
|
| 540 |
+
hfc_hll -- (N, C, D/2, H/2, W/2)
|
| 541 |
+
hfc_hlh -- (N, C, D/2, H/2, W/2)
|
| 542 |
+
hfc_hhl -- (N, C, D/2, H/2, W/2)
|
| 543 |
+
hfc_hhh -- (N, C, D/2, H/2, W/2)
|
| 544 |
+
output: the original 3D data -- (N, C, D, H, W)
|
| 545 |
+
"""
|
| 546 |
+
|
| 547 |
+
def __init__(self, wavename):
|
| 548 |
+
"""
|
| 549 |
+
3D inverse DWT (IDWT) for 3D data reconstruction
|
| 550 |
+
:param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
|
| 551 |
+
"""
|
| 552 |
+
super(IDWT_3D, self).__init__()
|
| 553 |
+
wavelet = pywt.Wavelet(wavename)
|
| 554 |
+
self.band_low = wavelet.dec_lo
|
| 555 |
+
self.band_high = wavelet.dec_hi
|
| 556 |
+
self.band_low.reverse()
|
| 557 |
+
self.band_high.reverse()
|
| 558 |
+
assert len(self.band_low) == len(self.band_high)
|
| 559 |
+
self.band_length = len(self.band_low)
|
| 560 |
+
assert self.band_length % 2 == 0
|
| 561 |
+
self.band_length_half = math.floor(self.band_length / 2)
|
| 562 |
+
|
| 563 |
+
def get_matrix(self):
|
| 564 |
+
"""
|
| 565 |
+
生成变换矩阵
|
| 566 |
+
generating the matrices: \mathcal{L}, \mathcal{H}
|
| 567 |
+
:return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
|
| 568 |
+
"""
|
| 569 |
+
L1 = np.max((self.input_height, self.input_width))
|
| 570 |
+
L = math.floor(L1 / 2)
|
| 571 |
+
matrix_h = np.zeros((L, L1 + self.band_length - 2))
|
| 572 |
+
matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
|
| 573 |
+
end = None if self.band_length_half == 1 else (
|
| 574 |
+
- self.band_length_half + 1)
|
| 575 |
+
|
| 576 |
+
index = 0
|
| 577 |
+
for i in range(L):
|
| 578 |
+
for j in range(self.band_length):
|
| 579 |
+
matrix_h[i, index + j] = self.band_low[j]
|
| 580 |
+
index += 2
|
| 581 |
+
matrix_h_0 = matrix_h[0:(math.floor(
|
| 582 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 583 |
+
matrix_h_1 = matrix_h[0:(math.floor(
|
| 584 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 585 |
+
matrix_h_2 = matrix_h[0:(math.floor(
|
| 586 |
+
self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
|
| 587 |
+
|
| 588 |
+
index = 0
|
| 589 |
+
for i in range(L1 - L):
|
| 590 |
+
for j in range(self.band_length):
|
| 591 |
+
matrix_g[i, index + j] = self.band_high[j]
|
| 592 |
+
index += 2
|
| 593 |
+
matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
|
| 594 |
+
self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
|
| 595 |
+
matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
|
| 596 |
+
self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
|
| 597 |
+
matrix_g_2 = matrix_g[0:(self.input_depth - math.floor(
|
| 598 |
+
self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
|
| 599 |
+
|
| 600 |
+
matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
|
| 601 |
+
matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
|
| 602 |
+
matrix_h_1 = np.transpose(matrix_h_1)
|
| 603 |
+
matrix_h_2 = matrix_h_2[:, (self.band_length_half - 1):end]
|
| 604 |
+
|
| 605 |
+
matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
|
| 606 |
+
matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
|
| 607 |
+
matrix_g_1 = np.transpose(matrix_g_1)
|
| 608 |
+
matrix_g_2 = matrix_g_2[:, (self.band_length_half - 1):end]
|
| 609 |
+
if torch.cuda.is_available():
|
| 610 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
|
| 611 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
|
| 612 |
+
self.matrix_low_2 = torch.Tensor(matrix_h_2).cuda()
|
| 613 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
|
| 614 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
|
| 615 |
+
self.matrix_high_2 = torch.Tensor(matrix_g_2).cuda()
|
| 616 |
+
else:
|
| 617 |
+
self.matrix_low_0 = torch.Tensor(matrix_h_0)
|
| 618 |
+
self.matrix_low_1 = torch.Tensor(matrix_h_1)
|
| 619 |
+
self.matrix_low_2 = torch.Tensor(matrix_h_2)
|
| 620 |
+
self.matrix_high_0 = torch.Tensor(matrix_g_0)
|
| 621 |
+
self.matrix_high_1 = torch.Tensor(matrix_g_1)
|
| 622 |
+
self.matrix_high_2 = torch.Tensor(matrix_g_2)
|
| 623 |
+
|
| 624 |
+
def forward(self, LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH):
|
| 625 |
+
"""
|
| 626 |
+
:param LLL: the low-frequency component, lfc
|
| 627 |
+
:param LLH: the high-frequency componetn, hfc_llh
|
| 628 |
+
:param LHL: the high-frequency componetn, hfc_lhl
|
| 629 |
+
:param LHH: the high-frequency componetn, hfc_lhh
|
| 630 |
+
:param HLL: the high-frequency componetn, hfc_hll
|
| 631 |
+
:param HLH: the high-frequency componetn, hfc_hlh
|
| 632 |
+
:param HHL: the high-frequency componetn, hfc_hhl
|
| 633 |
+
:param HHH: the high-frequency componetn, hfc_hhh
|
| 634 |
+
:return: the original 3D input data
|
| 635 |
+
"""
|
| 636 |
+
assert len(LLL.size()) == len(LLH.size()) == len(
|
| 637 |
+
LHL.size()) == len(LHH.size()) == 5
|
| 638 |
+
assert len(HLL.size()) == len(HLH.size()) == len(
|
| 639 |
+
HHL.size()) == len(HHH.size()) == 5
|
| 640 |
+
self.input_depth = LLL.size()[-3] + HHH.size()[-3]
|
| 641 |
+
self.input_height = LLL.size()[-2] + HHH.size()[-2]
|
| 642 |
+
self.input_width = LLL.size()[-1] + HHH.size()[-1]
|
| 643 |
+
self.get_matrix()
|
| 644 |
+
return IDWTFunction_3D.apply(LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH,
|
| 645 |
+
self.matrix_low_0, self.matrix_low_1, self.matrix_low_2,
|
| 646 |
+
self.matrix_high_0, self.matrix_high_1, self.matrix_high_2)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
if __name__ == '__main__':
|
| 650 |
+
dwt = DWT_2D("haar")
|
| 651 |
+
iwt = IDWT_2D("haar")
|
| 652 |
+
x = torch.randn(3, 3, 24, 24).cuda()
|
| 653 |
+
xll = x
|
| 654 |
+
wavelet_list = []
|
| 655 |
+
for i in range(3):
|
| 656 |
+
xll, xlh, xhl, xhh = dwt(xll)
|
| 657 |
+
wavelet_list.append([xll, xlh, xhl, xhh])
|
| 658 |
+
|
| 659 |
+
# xll = wavelet_list[-1] * torch.randn(xll.shape)
|
| 660 |
+
for i in range(2)[::-1]:
|
| 661 |
+
xll, xlh, xhl, xhh = wavelet_list[i]
|
| 662 |
+
xll = iwt(xll, xlh, xhl, xhh)
|
| 663 |
+
print(xll.shape)
|
| 664 |
+
|
| 665 |
+
print(torch.sum(x - xll))
|
| 666 |
+
print(torch.sum(x - iwt(*wavelet_list[0])))
|
DWT_IDWT/__init__.py
ADDED
|
File without changes
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 Paul Friedrich
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# WDM: 3D Wavelet Diffusion Models for High-Resolution Medical Image Synthesis
|
| 2 |
+
[](https://opensource.org/licenses/MIT)
|
| 3 |
+
[](https://pfriedri.github.io/wdm-3d-io/)
|
| 4 |
+
[](https://arxiv.org/abs/2402.19043)
|
| 5 |
+
|
| 6 |
+
This is the official PyTorch implementation of the paper **WDM: 3D Wavelet Diffusion Models for High-Resolution Medical Image Synthesis** by [Paul Friedrich](https://pfriedri.github.io/), [Julia Wolleb](https://dbe.unibas.ch/en/persons/julia-wolleb/), [Florentin Bieder](https://dbe.unibas.ch/en/persons/florentin-bieder/), [Alicia Durrer](https://dbe.unibas.ch/en/persons/alicia-durrer/) and [Philippe C. Cattin](https://dbe.unibas.ch/en/persons/philippe-claude-cattin/).
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
If you find our work useful, please consider to :star: **star this repository** and :memo: **cite our paper**:
|
| 10 |
+
```bibtex
|
| 11 |
+
@inproceedings{friedrich2024wdm,
|
| 12 |
+
title={Wdm: 3d wavelet diffusion models for high-resolution medical image synthesis},
|
| 13 |
+
author={Friedrich, Paul and Wolleb, Julia and Bieder, Florentin and Durrer, Alicia and Cattin, Philippe C},
|
| 14 |
+
booktitle={MICCAI Workshop on Deep Generative Models},
|
| 15 |
+
pages={11--21},
|
| 16 |
+
year={2024},
|
| 17 |
+
organization={Springer}}
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
## Paper Abstract
|
| 21 |
+
Due to the three-dimensional nature of CT- or MR-scans, generative modeling of medical images is a particularly challenging task. Existing approaches mostly apply patch-wise, slice-wise, or cascaded generation techniques to fit the high-dimensional data into the limited GPU memory. However, these approaches may introduce artifacts and potentially restrict the model's applicability for certain downstream tasks. This work presents WDM, a wavelet-based medical image synthesis framework that applies a diffusion model on wavelet decomposed images. The presented approach is a simple yet effective way of scaling diffusion models to high resolutions and can be trained on a single 40 GB GPU. Experimental results on BraTS and LIDC-IDRI unconditional image generation at a resolution of 128 x 128 x 128 show state-of-the-art image fidelity (FID) and sample diversity (MS-SSIM) scores compared to GANs, Diffusion Models, and Latent Diffusion Models. Our proposed method is the only one capable of generating high-quality images at a resolution of 256 x 256 x 256.
|
| 22 |
+
|
| 23 |
+
<p>
|
| 24 |
+
<img width="750" src="assets/wdm.png"/>
|
| 25 |
+
</p>
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## Dependencies
|
| 29 |
+
We recommend using a [conda](https://github.com/conda-forge/miniforge#mambaforge) environment to install the required dependencies.
|
| 30 |
+
You can create and activate such an environment called `wdm` by running the following commands:
|
| 31 |
+
```sh
|
| 32 |
+
mamba env create -f environment.yml
|
| 33 |
+
mamba activate wdm
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
## Training & Sampling
|
| 37 |
+
For training a new model or sampling from an already trained one, you can simply adapt and use the script `run.sh`. All relevant hyperparameters for reproducing our results are automatically set when using the correct `MODEL` in the general settings.
|
| 38 |
+
For executing the script, simply use the following command:
|
| 39 |
+
```sh
|
| 40 |
+
bash run.sh
|
| 41 |
+
```
|
| 42 |
+
**Supported settings** (set in `run.sh` file):
|
| 43 |
+
|
| 44 |
+
MODE: `'training'`, `'sampling'`
|
| 45 |
+
|
| 46 |
+
MODEL: `'ours_unet_128'`, `'ours_unet_256'`, `'ours_wnet_128'`, `'ours_wnet_256'`
|
| 47 |
+
|
| 48 |
+
DATASET: `'brats'`, `'lidc-idri'`
|
| 49 |
+
|
| 50 |
+
## Conditional Image Synthesis / Image-to-Image Translation
|
| 51 |
+
To use WDM for conditional image synthesis or paired image-to-image translation check out our repository [pfriedri/cwdm](https://github.com/pfriedri/cwdm) that implements our paper **cWDM: Conditional Wavelet Diffusion Models for Cross-Modality 3D Medical Image Synthesis**.
|
| 52 |
+
|
| 53 |
+
## Pretrained Models
|
| 54 |
+
We released pretrained models on [HuggingFace](https://huggingface.co/pfriedri/wdm-3d).
|
| 55 |
+
|
| 56 |
+
Currently available models:
|
| 57 |
+
- [BraTS 128](https://huggingface.co/pfriedri/wdm-3d/blob/main/brats_unet_128_1200k.pt): BraTS, 128 x 128 x 128, U-Net backbone, 1.2M Iterations
|
| 58 |
+
- [LIDC-IDRI 128](https://huggingface.co/pfriedri/wdm-3d/blob/main/lidc-idri_unet_128_1200k.pt): LIDC-IDRI, 128 x 128 x 128, U-Net backbone, 1.2M Iterations
|
| 59 |
+
|
| 60 |
+
## Data
|
| 61 |
+
To ensure good reproducibility, we trained and evaluated our network on two publicly available datasets:
|
| 62 |
+
* **BRATS 2023: Adult Glioma**, a dataset containing routine clinically-acquired, multi-site multiparametric magnetic resonance imaging (MRI) scans of brain tumor patients. We just used the T1-weighted images for training. The data is available [here](https://www.synapse.org/#!Synapse:syn51514105).
|
| 63 |
+
|
| 64 |
+
* **LIDC-IDRI**, a dataset containing multi-site, thoracic computed tomography (CT) scans of lung cancer patients. The data is available [here](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=1966254).
|
| 65 |
+
|
| 66 |
+
The provided code works for the following data structure (you might need to adapt the `DATA_DIR` variable in `run.sh`):
|
| 67 |
+
```
|
| 68 |
+
data
|
| 69 |
+
└───BRATS
|
| 70 |
+
└───BraTS-GLI-00000-000
|
| 71 |
+
└───BraTS-GLI-00000-000-seg.nii.gz
|
| 72 |
+
└───BraTS-GLI-00000-000-t1c.nii.gz
|
| 73 |
+
└───BraTS-GLI-00000-000-t1n.nii.gz
|
| 74 |
+
└───BraTS-GLI-00000-000-t2f.nii.gz
|
| 75 |
+
└───BraTS-GLI-00000-000-t2w.nii.gz
|
| 76 |
+
└───BraTS-GLI-00001-000
|
| 77 |
+
└───BraTS-GLI-00002-000
|
| 78 |
+
...
|
| 79 |
+
|
| 80 |
+
└───LIDC-IDRI
|
| 81 |
+
└───LIDC-IDRI-0001
|
| 82 |
+
└───preprocessed.nii.gz
|
| 83 |
+
└───LIDC-IDRI-0002
|
| 84 |
+
└───LIDC-IDRI-0003
|
| 85 |
+
...
|
| 86 |
+
```
|
| 87 |
+
We provide a script for preprocessing LIDC-IDRI. Simply run the following command with the correct path to the downloaded DICOM files `DICOM_PATH` and the directory you want to store the processed nifti files `NIFTI_PATH`:
|
| 88 |
+
```sh
|
| 89 |
+
python utils/preproc_lidc-idri.py --dicom_dir DICOM_PATH --nifti_dir NIFTI_PATH
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
## Evaluation
|
| 93 |
+
As our code for evaluating the model performance has slightly different dependencies, we provide a second .yml file to set up the evaluation environment.
|
| 94 |
+
Simply use the following command to create and activate the new environment:
|
| 95 |
+
```sh
|
| 96 |
+
mamba env create -f eval/eval_environment.yml
|
| 97 |
+
mamba activate eval
|
| 98 |
+
```
|
| 99 |
+
### FID
|
| 100 |
+
For computing the FID score, you need to specify the following variables and use them in the command below:
|
| 101 |
+
* DATASET: `brats` or `lidc-idri`
|
| 102 |
+
* IMG_SIZE: `128` or `256`
|
| 103 |
+
* REAL_DATA_DIR: path to your real data
|
| 104 |
+
* FAKE_DATA_DIR: path to your generated/ fake data
|
| 105 |
+
* PATH_TO_FEATURE_EXTRACTOR: path to the feature extractor weights, e.g. `./eval/pretrained/resnet_50_23dataset.pt`
|
| 106 |
+
* PATH_TO_ACTIVATIONS: path to the location where you want to save mus and sigmas (in case you want to reuse them), e.g. `./eval/activations/`
|
| 107 |
+
* GPU_ID: gpu you want to use, e.g. `0`
|
| 108 |
+
```sh
|
| 109 |
+
python eval/fid.py --dataset DATASET --img_size IMG_SIZE --data_root_real REAL_DATA_DIR --data_root_fake FAKE_DATA_DIR --pretrain_path PATH_TO_FEATURE_EXTRACTOR --path_to_activations PATH_TO_ACTIVATIONS --gpu_id GPU_ID
|
| 110 |
+
```
|
| 111 |
+
### Mean MS-SSIM
|
| 112 |
+
For computing the mean MS-SSIM, you need to specify the following variables and use them in the command below:
|
| 113 |
+
* DATASET: `brats` or `lidc-idri`
|
| 114 |
+
* IMG_SIZE: `128` or `256`
|
| 115 |
+
* SAMPLE_DIR: path to the generated (or real) data
|
| 116 |
+
|
| 117 |
+
```sh
|
| 118 |
+
python eval/ms_ssim.py --dataset DATASET --img_size IMG_SIZE --sample_dir SAMPLE_DIR
|
| 119 |
+
```
|
| 120 |
+
## Implementation Details for Comparing Methods
|
| 121 |
+
* **HA-GAN**: For implementing the paper [Hierarchical Amortized GAN for 3D High Resolution Medical Image Synthesis](https://ieeexplore.ieee.org/abstract/document/9770375), we use the publicly available [implementation](https://github.com/batmanlab/HA-GAN). We follow the implementation details presented in the original paper (Section E). The authors recommend cutting all zero slices from the volumes before training. To allow a fair comparison with other methods, we have omitted this step.
|
| 122 |
+
* **3D-LDM**: For implementing the paper [Denoising Diffusion Probabilistic Models for 3D Medical Image Generation](https://www.nature.com/articles/s41598-023-34341-2), we use the publicly available [implementation](https://github.com/FirasGit/medicaldiffusion). We follow the implementation details presented in the Supplementary Material of the original paper (Supplementary Table 1).
|
| 123 |
+
* **2.5D-LDM**: For implementing the paper [Make-A-Volume: Leveraging Latent Diffusion Models for Cross-Modality 3D Brain MRI Synthesis](https://link.springer.com/chapter/10.1007/978-3-031-43999-5_56), we adopted the method to work for image generation. We trained a VQ-VAE (downsampling factor 4, latent dimension 32) using an implementation from [MONAI Generative](https://github.com/Project-MONAI/GenerativeModels) and a diffusion model implementation from [OpenAI](https://github.com/openai/guided-diffusion). For implementing the pseudo 3D layers, we use a script provided by the authors. To allow for image generation, we sample all slices at once - meaning that the models batch size and the dimension of the 1D convolution is equal to the number of slices in the volume to be generated.
|
| 124 |
+
* **3D DDPM**: For implementing a memory efficient baseline model, we use the 3D DDPM presented in the paper [Memory-Efficient 3D Denoising Diffusion Models for Medical Image Processing](https://openreview.net/forum?id=neXqIGpO-tn), and used the publicly available [implementation](https://github.com/FlorentinBieder/PatchDDM-3D). We use additive skip connections and train the model with the same hyperparameters as our models.
|
| 125 |
+
|
| 126 |
+
All experiments were performed on a system with an AMD Epyc 7742 CPU and a NVIDIA A100 (40GB) GPU.
|
| 127 |
+
|
| 128 |
+
## TODOs
|
| 129 |
+
We plan to add further functionality to our framework:
|
| 130 |
+
- [ ] Add compatibility for more datasets like MRNet, ADNI, or fastMRI
|
| 131 |
+
- [x] Release pre-trained models
|
| 132 |
+
- [ ] Extend the framework for 3D image inpainting
|
| 133 |
+
- [x] Extend the framework for 3D image-to-image translation ([pfriedri/cwdm](https://github.com/pfriedri/cwdm))
|
| 134 |
+
|
| 135 |
+
## Acknowledgements
|
| 136 |
+
Our code is based on / inspired by the following repositories:
|
| 137 |
+
* https://github.com/openai/guided-diffusion (published under [MIT License](https://github.com/openai/guided-diffusion/blob/main/LICENSE))
|
| 138 |
+
* https://github.com/FlorentinBieder/PatchDDM-3D (published under [MIT License](https://github.com/FlorentinBieder/PatchDDM-3D/blob/master/LICENSE))
|
| 139 |
+
* https://github.com/VinAIResearch/WaveDiff (published under [GNU General Public License v3.0](https://github.com/VinAIResearch/WaveDiff/blob/main/LICENSE))
|
| 140 |
+
* https://github.com/LiQiufu/WaveCNet (published under [CC BY-NC-SA 4.0 License](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode))
|
| 141 |
+
|
| 142 |
+
For computing FID scores we use a pretrained model (`resnet_50_23dataset.pth`) from:
|
| 143 |
+
* https://github.com/Tencent/MedicalNet (published uner [MIT License](https://github.com/Tencent/MedicalNet/blob/master/LICENSE))
|
| 144 |
+
|
| 145 |
+
Thanks for making these projects open-source.
|
assets/wdm.png
ADDED
|
Git LFS Details
|
environment.yml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: wdm
|
| 2 |
+
channels:
|
| 3 |
+
- pytorch
|
| 4 |
+
- nvidia
|
| 5 |
+
- conda-forge
|
| 6 |
+
dependencies:
|
| 7 |
+
- python=3.10.13
|
| 8 |
+
- numpy=1.26.4
|
| 9 |
+
- pytorch=2.2.0=py3.10_cuda11.8_cudnn8.7.0_0
|
| 10 |
+
- pytorch-cuda=11.8
|
| 11 |
+
- pywavelets=1.4.1
|
| 12 |
+
- scipy=1.12.0
|
| 13 |
+
- torchaudio=2.2.0=py310_cu118
|
| 14 |
+
- torchvision=0.17.0=py310_cu118
|
| 15 |
+
- pip
|
| 16 |
+
- pip:
|
| 17 |
+
- nibabel==5.2.0
|
| 18 |
+
- blobfile==2.1.1
|
| 19 |
+
- tensorboard==2.16.2
|
| 20 |
+
- matplotlib==3.8.3
|
| 21 |
+
- tqdm==4.66.2
|
| 22 |
+
- dicom2nifti==2.4.10
|
eval/activations/activations.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Path to store intermediate activations for computing FID scores.
|
eval/eval_environment.yml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: eval
|
| 2 |
+
channels:
|
| 3 |
+
- pytorch
|
| 4 |
+
- nvidia
|
| 5 |
+
- conda-forge
|
| 6 |
+
dependencies:
|
| 7 |
+
- numpy=1.24.4
|
| 8 |
+
- pip=24.2
|
| 9 |
+
- python=3.8.19
|
| 10 |
+
- pytorch=2.4.0=py3.8_cuda11.8_cudnn9.1.0_0
|
| 11 |
+
- pytorch-cuda=11.8
|
| 12 |
+
- scipy=1.10.1
|
| 13 |
+
- torchaudio=2.4.0=py38_cu118
|
| 14 |
+
- torchvision=0.19.0=py38_cu118
|
| 15 |
+
- pip:
|
| 16 |
+
- monai==1.3.2
|
| 17 |
+
- monai-generative==0.2.3
|
| 18 |
+
- nibabel==5.2.1
|
| 19 |
+
- tqdm==4.66.5
|
eval/fid.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
from torch.utils.data import DataLoader
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import argparse
|
| 9 |
+
|
| 10 |
+
sys.path.append(".")
|
| 11 |
+
sys.path.append("..")
|
| 12 |
+
|
| 13 |
+
from scipy import linalg
|
| 14 |
+
|
| 15 |
+
from guided_diffusion.bratsloader import BRATSVolumes
|
| 16 |
+
from guided_diffusion.lidcloader import LIDCVolumes
|
| 17 |
+
from model import generate_model
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_feature_extractor(sets):
|
| 21 |
+
model, _ = generate_model(sets)
|
| 22 |
+
checkpoint = torch.load(sets.pretrain_path)
|
| 23 |
+
model.load_state_dict(checkpoint['state_dict'])
|
| 24 |
+
model.eval()
|
| 25 |
+
print("Done. Initialized feature extraction model and loaded pretrained weights.")
|
| 26 |
+
|
| 27 |
+
return model
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_activations(model, data_loader, sets):
|
| 31 |
+
pred_arr = np.empty((sets.num_samples, sets.dims))
|
| 32 |
+
|
| 33 |
+
for i, batch in enumerate(data_loader):
|
| 34 |
+
if isinstance(batch, list):
|
| 35 |
+
batch = batch[0]
|
| 36 |
+
batch = batch.cuda()
|
| 37 |
+
if i % 10 == 0:
|
| 38 |
+
print('\rPropagating batch %d' % i, end='', flush=True)
|
| 39 |
+
with torch.no_grad():
|
| 40 |
+
pred = model(batch)
|
| 41 |
+
|
| 42 |
+
if i*sets.batch_size >= pred_arr.shape[0]:
|
| 43 |
+
pred_arr[i*sets.batch_size:] = pred.cpu().numpy()
|
| 44 |
+
break
|
| 45 |
+
else:
|
| 46 |
+
pred_arr[i*sets.batch_size:(i+1)*sets.batch_size] = pred.cpu().numpy()
|
| 47 |
+
|
| 48 |
+
return pred_arr
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
|
| 52 |
+
"""
|
| 53 |
+
Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians
|
| 54 |
+
X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
|
| 55 |
+
|
| 56 |
+
Params:
|
| 57 |
+
-- mu1 : Numpy array containing the activations of a layer of the inception net (like returned by the function
|
| 58 |
+
'get_predictions') for generated samples.
|
| 59 |
+
-- mu2 : The sample mean over activations, precalculated on a representative data set.
|
| 60 |
+
-- sigma1: The covariance matrix over activations for generated samples.
|
| 61 |
+
-- sigma2: The covariance matrix over activations, precalculated on a representative data set.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
-- : The Frechet Distance.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
mu1 = np.atleast_1d(mu1)
|
| 68 |
+
mu2 = np.atleast_1d(mu2)
|
| 69 |
+
|
| 70 |
+
sigma1 = np.atleast_2d(sigma1)
|
| 71 |
+
sigma2 = np.atleast_2d(sigma2)
|
| 72 |
+
|
| 73 |
+
assert mu1.shape == mu2.shape, \
|
| 74 |
+
'Training and test mean vectors have different lengths'
|
| 75 |
+
assert sigma1.shape == sigma2.shape, \
|
| 76 |
+
'Training and test covariances have different dimensions'
|
| 77 |
+
|
| 78 |
+
diff = mu1 - mu2
|
| 79 |
+
|
| 80 |
+
# Product might be almost singular
|
| 81 |
+
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
|
| 82 |
+
if not np.isfinite(covmean).all():
|
| 83 |
+
msg = ('fid calculation produces singular product; '
|
| 84 |
+
'adding %s to diagonal of cov estimates') % eps
|
| 85 |
+
print(msg)
|
| 86 |
+
offset = np.eye(sigma1.shape[0]) * eps
|
| 87 |
+
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
|
| 88 |
+
|
| 89 |
+
# Numerical error might give slight imaginary component
|
| 90 |
+
if np.iscomplexobj(covmean):
|
| 91 |
+
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
|
| 92 |
+
m = np.max(np.abs(covmean.imag))
|
| 93 |
+
raise ValueError('Imaginary component {}'.format(m))
|
| 94 |
+
covmean = covmean.real
|
| 95 |
+
|
| 96 |
+
tr_covmean = np.trace(covmean)
|
| 97 |
+
|
| 98 |
+
return (diff.dot(diff) + np.trace(sigma1) +
|
| 99 |
+
np.trace(sigma2) - 2 * tr_covmean)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def process_feature_vecs(activations):
|
| 103 |
+
mu = np.mean(activations, axis=0)
|
| 104 |
+
sigma = np.cov(activations, rowvar=False)
|
| 105 |
+
|
| 106 |
+
return mu, sigma
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def parse_opts():
|
| 110 |
+
parser = argparse.ArgumentParser()
|
| 111 |
+
parser.add_argument('--dataset', required=True, type=str, help='Dataset (brats | lidc-idri)')
|
| 112 |
+
parser.add_argument('--img_size', required=True, type=int, help='Image size')
|
| 113 |
+
parser.add_argument('--data_root_real', required=True, type=str, help='Path to real data')
|
| 114 |
+
parser.add_argument('--data_root_fake', required=True, type=str, help='Path to fake data')
|
| 115 |
+
parser.add_argument('--pretrain_path', required=True, type=str, help='Path to pretrained model')
|
| 116 |
+
parser.add_argument('--path_to_activations', required=True, type=str, help='Path to activations')
|
| 117 |
+
parser.add_argument('--n_seg_classes', default=2, type=int, help="Number of segmentation classes")
|
| 118 |
+
parser.add_argument('--learning_rate', default=0.001, type=float,
|
| 119 |
+
help='Initial learning rate (divided by 10 while training by lr scheduler)')
|
| 120 |
+
parser.add_argument('--num_workers', default=4, type=int, help='Number of jobs')
|
| 121 |
+
parser.add_argument('--batch_size', default=1, type=int, help='Batch Size')
|
| 122 |
+
parser.add_argument('--phase', default='test', type=str, help='Phase of train or test')
|
| 123 |
+
parser.add_argument('--save_intervals', default=10, type=int, help='Interation for saving model')
|
| 124 |
+
parser.add_argument('--n_epochs', default=200, type=int, help='Number of total epochs to run')
|
| 125 |
+
parser.add_argument('--input_D', default=256, type=int, help='Input size of depth')
|
| 126 |
+
parser.add_argument('--input_H', default=256, type=int, help='Input size of height')
|
| 127 |
+
parser.add_argument('--input_W', default=256, type=int, help='Input size of width')
|
| 128 |
+
parser.add_argument('--resume_path', default='', type=str, help='Path for resume model.')
|
| 129 |
+
|
| 130 |
+
parser.add_argument('--new_layer_names', default=['conv_seg'], type=list, help='New layer except for backbone')
|
| 131 |
+
parser.add_argument('--no_cuda', action='store_true', help='If true, cuda is not used.')
|
| 132 |
+
parser.set_defaults(no_cuda=False)
|
| 133 |
+
parser.add_argument('--gpu_id', default=0, type=int, help='Gpu id')
|
| 134 |
+
parser.add_argument('--model', default='resnet', type=str,
|
| 135 |
+
help='(resnet | preresnet | wideresnet | resnext | densenet | ')
|
| 136 |
+
parser.add_argument('--model_depth', default=50, type=int, help='Depth of resnet (10 | 18 | 34 | 50 | 101)')
|
| 137 |
+
parser.add_argument('--resnet_shortcut', default='B', type=str, help='Shortcut type of resnet (A | B)')
|
| 138 |
+
parser.add_argument('--manual_seed', default=1, type=int, help='Manually set random seed')
|
| 139 |
+
parser.add_argument('--ci_test', action='store_true', help='If true, ci testing is used.')
|
| 140 |
+
args = parser.parse_args()
|
| 141 |
+
args.save_folder = "./trails/models/{}_{}".format(args.model, args.model_depth)
|
| 142 |
+
|
| 143 |
+
return args
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
if __name__ == '__main__':
|
| 147 |
+
# Model settings
|
| 148 |
+
sets = parse_opts()
|
| 149 |
+
sets.target_type = "normal"
|
| 150 |
+
sets.phase = 'test'
|
| 151 |
+
sets.batch_size = 1
|
| 152 |
+
sets.dims = 2048
|
| 153 |
+
sets.num_samples = 1000
|
| 154 |
+
|
| 155 |
+
if not sets.no_cuda:
|
| 156 |
+
dev_name = 'cuda:' + str(sets.gpu_id)
|
| 157 |
+
device = torch.device(dev_name)
|
| 158 |
+
else:
|
| 159 |
+
device = torch.device('cpu')
|
| 160 |
+
|
| 161 |
+
# getting model
|
| 162 |
+
print("Load model ...")
|
| 163 |
+
model = get_feature_extractor(sets)
|
| 164 |
+
model = model.to(device)
|
| 165 |
+
|
| 166 |
+
# Data loader
|
| 167 |
+
print("Initialize dataloader ...")
|
| 168 |
+
if sets.dataset == 'brats':
|
| 169 |
+
real_data = BRATSVolumes(sets.data_root_real, normalize=None, mode='real', img_size=sets.img_size)
|
| 170 |
+
fake_data = BRATSVolumes(sets.data_root_fake, normalize=None, mode='fake', img_size=sets.img_size)
|
| 171 |
+
|
| 172 |
+
elif sets.dataset == 'lidc-idri':
|
| 173 |
+
real_data = LIDCVolumes(sets.data_root_real, normalize=None, mode='real', img_size=sets.img_size)
|
| 174 |
+
fake_data = LIDCVolumes(sets.data_root_fake, normalize=None, mode='fake', img_size=sets.img_size)
|
| 175 |
+
|
| 176 |
+
else:
|
| 177 |
+
print("Dataloader for this dataset is not implemented. Use 'brats' or 'lidc-idri'.")
|
| 178 |
+
|
| 179 |
+
real_data_loader = DataLoader(real_data, batch_size=sets.batch_size, shuffle=False, num_workers=sets.batch_size,
|
| 180 |
+
pin_memory=False)
|
| 181 |
+
fake_data_loader = DataLoader(fake_data, batch_size=sets.batch_size, shuffle=False, num_workers=sets.batch_size,
|
| 182 |
+
pin_memory=False)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
# Real data
|
| 186 |
+
print("Get activations from real data ...")
|
| 187 |
+
activations_real = get_activations(model, real_data_loader, sets)
|
| 188 |
+
mu_real, sigma_real = process_feature_vecs(activations_real)
|
| 189 |
+
|
| 190 |
+
path_to_mu_real = os.path.join(sets.path_to_activations, 'mu_real.npy')
|
| 191 |
+
path_to_sigma_real = os.path.join(sets.path_to_activations, 'sigma_real.npy')
|
| 192 |
+
np.save(path_to_mu_real, mu_real)
|
| 193 |
+
print("")
|
| 194 |
+
print("Saved mu_real to: " + path_to_mu_real)
|
| 195 |
+
np.save(path_to_sigma_real, sigma_real)
|
| 196 |
+
print("Saved sigma_real to: " + path_to_sigma_real)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# Fake data
|
| 200 |
+
print("Get activations from fake/generated data ...")
|
| 201 |
+
activations_fake = get_activations(model, fake_data_loader, sets)
|
| 202 |
+
mu_fake, sigma_fake = process_feature_vecs(activations_fake)
|
| 203 |
+
|
| 204 |
+
path_to_mu_fake = os.path.join(sets.path_to_activations, 'mu_fake.npy')
|
| 205 |
+
path_to_sigma_fake = os.path.join(sets.path_to_activations, 'sigma_fake.npy')
|
| 206 |
+
np.save(path_to_mu_fake, mu_fake)
|
| 207 |
+
print("")
|
| 208 |
+
print("Saved mu_fake to: " + path_to_mu_fake)
|
| 209 |
+
np.save(path_to_sigma_fake, sigma_fake)
|
| 210 |
+
print("Saved sigma_fake to: " + path_to_sigma_fake)
|
| 211 |
+
|
| 212 |
+
fid = calculate_frechet_distance(mu_real, sigma_real, mu_fake, sigma_fake)
|
| 213 |
+
print("The FID score is: ")
|
| 214 |
+
print(fid)
|
eval/model.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
from models import resnet
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def generate_model(opt):
|
| 7 |
+
assert opt.model in ['resnet']
|
| 8 |
+
|
| 9 |
+
if opt.model == 'resnet':
|
| 10 |
+
assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]
|
| 11 |
+
|
| 12 |
+
if opt.model_depth == 10:
|
| 13 |
+
model = resnet.resnet10(
|
| 14 |
+
sample_input_W=opt.input_W,
|
| 15 |
+
sample_input_H=opt.input_H,
|
| 16 |
+
sample_input_D=opt.input_D,
|
| 17 |
+
shortcut_type=opt.resnet_shortcut,
|
| 18 |
+
no_cuda=opt.no_cuda,
|
| 19 |
+
num_seg_classes=opt.n_seg_classes)
|
| 20 |
+
elif opt.model_depth == 18:
|
| 21 |
+
model = resnet.resnet18(
|
| 22 |
+
sample_input_W=opt.input_W,
|
| 23 |
+
sample_input_H=opt.input_H,
|
| 24 |
+
sample_input_D=opt.input_D,
|
| 25 |
+
shortcut_type=opt.resnet_shortcut,
|
| 26 |
+
no_cuda=opt.no_cuda,
|
| 27 |
+
num_seg_classes=opt.n_seg_classes)
|
| 28 |
+
elif opt.model_depth == 34:
|
| 29 |
+
model = resnet.resnet34(
|
| 30 |
+
sample_input_W=opt.input_W,
|
| 31 |
+
sample_input_H=opt.input_H,
|
| 32 |
+
sample_input_D=opt.input_D,
|
| 33 |
+
shortcut_type=opt.resnet_shortcut,
|
| 34 |
+
no_cuda=opt.no_cuda,
|
| 35 |
+
num_seg_classes=opt.n_seg_classes)
|
| 36 |
+
elif opt.model_depth == 50:
|
| 37 |
+
model = resnet.resnet50(
|
| 38 |
+
sample_input_W=opt.input_W,
|
| 39 |
+
sample_input_H=opt.input_H,
|
| 40 |
+
sample_input_D=opt.input_D,
|
| 41 |
+
shortcut_type=opt.resnet_shortcut,
|
| 42 |
+
no_cuda=opt.no_cuda,
|
| 43 |
+
num_seg_classes=opt.n_seg_classes)
|
| 44 |
+
elif opt.model_depth == 101:
|
| 45 |
+
model = resnet.resnet101(
|
| 46 |
+
sample_input_W=opt.input_W,
|
| 47 |
+
sample_input_H=opt.input_H,
|
| 48 |
+
sample_input_D=opt.input_D,
|
| 49 |
+
shortcut_type=opt.resnet_shortcut,
|
| 50 |
+
no_cuda=opt.no_cuda,
|
| 51 |
+
num_seg_classes=opt.n_seg_classes)
|
| 52 |
+
elif opt.model_depth == 152:
|
| 53 |
+
model = resnet.resnet152(
|
| 54 |
+
sample_input_W=opt.input_W,
|
| 55 |
+
sample_input_H=opt.input_H,
|
| 56 |
+
sample_input_D=opt.input_D,
|
| 57 |
+
shortcut_type=opt.resnet_shortcut,
|
| 58 |
+
no_cuda=opt.no_cuda,
|
| 59 |
+
num_seg_classes=opt.n_seg_classes)
|
| 60 |
+
elif opt.model_depth == 200:
|
| 61 |
+
model = resnet.resnet200(
|
| 62 |
+
sample_input_W=opt.input_W,
|
| 63 |
+
sample_input_H=opt.input_H,
|
| 64 |
+
sample_input_D=opt.input_D,
|
| 65 |
+
shortcut_type=opt.resnet_shortcut,
|
| 66 |
+
no_cuda=opt.no_cuda,
|
| 67 |
+
num_seg_classes=opt.n_seg_classes)
|
| 68 |
+
|
| 69 |
+
if not opt.no_cuda:
|
| 70 |
+
import os
|
| 71 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_id)
|
| 72 |
+
model = model.cuda()
|
| 73 |
+
model = nn.DataParallel(model)
|
| 74 |
+
net_dict = model.state_dict()
|
| 75 |
+
else:
|
| 76 |
+
net_dict = model.state_dict()
|
| 77 |
+
|
| 78 |
+
# load pretrain
|
| 79 |
+
if opt.phase != 'test' and opt.pretrain_path:
|
| 80 |
+
print('loading pretrained model {}'.format(opt.pretrain_path))
|
| 81 |
+
pretrain = torch.load(opt.pretrain_path)
|
| 82 |
+
pretrain_dict = {k: v for k, v in pretrain['state_dict'].items() if k in net_dict.keys()}
|
| 83 |
+
|
| 84 |
+
net_dict.update(pretrain_dict)
|
| 85 |
+
model.load_state_dict(net_dict)
|
| 86 |
+
|
| 87 |
+
new_parameters = []
|
| 88 |
+
for pname, p in model.named_parameters():
|
| 89 |
+
for layer_name in opt.new_layer_names:
|
| 90 |
+
if pname.find(layer_name) >= 0:
|
| 91 |
+
new_parameters.append(p)
|
| 92 |
+
break
|
| 93 |
+
|
| 94 |
+
new_parameters_id = list(map(id, new_parameters))
|
| 95 |
+
base_parameters = list(filter(lambda p: id(p) not in new_parameters_id, model.parameters()))
|
| 96 |
+
parameters = {'base_parameters': base_parameters,
|
| 97 |
+
'new_parameters': new_parameters}
|
| 98 |
+
|
| 99 |
+
return model, parameters
|
| 100 |
+
|
| 101 |
+
return model, model.parameters()
|
eval/models/resnet.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from torch.autograd import Variable
|
| 5 |
+
import math
|
| 6 |
+
from functools import partial
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
|
| 10 |
+
'resnet152', 'resnet200'
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def conv3x3x3(in_planes, out_planes, stride=1, dilation=1):
|
| 15 |
+
# 3x3x3 convolution with padding
|
| 16 |
+
return nn.Conv3d(
|
| 17 |
+
in_planes,
|
| 18 |
+
out_planes,
|
| 19 |
+
kernel_size=3,
|
| 20 |
+
dilation=dilation,
|
| 21 |
+
stride=stride,
|
| 22 |
+
padding=dilation,
|
| 23 |
+
bias=False)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def downsample_basic_block(x, planes, stride, no_cuda=False):
|
| 27 |
+
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
|
| 28 |
+
zero_pads = torch.Tensor(
|
| 29 |
+
out.size(0), planes - out.size(1), out.size(2), out.size(3),
|
| 30 |
+
out.size(4)).zero_()
|
| 31 |
+
if not no_cuda:
|
| 32 |
+
if isinstance(out.data, torch.cuda.FloatTensor):
|
| 33 |
+
zero_pads = zero_pads.cuda()
|
| 34 |
+
|
| 35 |
+
out = Variable(torch.cat([out.data, zero_pads], dim=1))
|
| 36 |
+
|
| 37 |
+
return out
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class BasicBlock(nn.Module):
|
| 41 |
+
expansion = 1
|
| 42 |
+
|
| 43 |
+
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
|
| 44 |
+
super(BasicBlock, self).__init__()
|
| 45 |
+
self.conv1 = conv3x3x3(inplanes, planes, stride=stride, dilation=dilation)
|
| 46 |
+
self.bn1 = nn.BatchNorm3d(planes)
|
| 47 |
+
self.relu = nn.ReLU(inplace=True)
|
| 48 |
+
self.conv2 = conv3x3x3(planes, planes, dilation=dilation)
|
| 49 |
+
self.bn2 = nn.BatchNorm3d(planes)
|
| 50 |
+
self.downsample = downsample
|
| 51 |
+
self.stride = stride
|
| 52 |
+
self.dilation = dilation
|
| 53 |
+
|
| 54 |
+
def forward(self, x):
|
| 55 |
+
residual = x
|
| 56 |
+
|
| 57 |
+
out = self.conv1(x)
|
| 58 |
+
out = self.bn1(out)
|
| 59 |
+
out = self.relu(out)
|
| 60 |
+
out = self.conv2(out)
|
| 61 |
+
out = self.bn2(out)
|
| 62 |
+
|
| 63 |
+
if self.downsample is not None:
|
| 64 |
+
residual = self.downsample(x)
|
| 65 |
+
|
| 66 |
+
out += residual
|
| 67 |
+
out = self.relu(out)
|
| 68 |
+
|
| 69 |
+
return out
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class Bottleneck(nn.Module):
|
| 73 |
+
expansion = 4
|
| 74 |
+
|
| 75 |
+
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
|
| 76 |
+
super(Bottleneck, self).__init__()
|
| 77 |
+
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
|
| 78 |
+
self.bn1 = nn.BatchNorm3d(planes)
|
| 79 |
+
self.conv2 = nn.Conv3d(
|
| 80 |
+
planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False)
|
| 81 |
+
self.bn2 = nn.BatchNorm3d(planes)
|
| 82 |
+
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
|
| 83 |
+
self.bn3 = nn.BatchNorm3d(planes * 4)
|
| 84 |
+
self.relu = nn.ReLU(inplace=True)
|
| 85 |
+
self.downsample = downsample
|
| 86 |
+
self.stride = stride
|
| 87 |
+
self.dilation = dilation
|
| 88 |
+
|
| 89 |
+
def forward(self, x):
|
| 90 |
+
residual = x
|
| 91 |
+
|
| 92 |
+
out = self.conv1(x)
|
| 93 |
+
out = self.bn1(out)
|
| 94 |
+
out = self.relu(out)
|
| 95 |
+
|
| 96 |
+
out = self.conv2(out)
|
| 97 |
+
out = self.bn2(out)
|
| 98 |
+
out = self.relu(out)
|
| 99 |
+
|
| 100 |
+
out = self.conv3(out)
|
| 101 |
+
out = self.bn3(out)
|
| 102 |
+
|
| 103 |
+
if self.downsample is not None:
|
| 104 |
+
residual = self.downsample(x)
|
| 105 |
+
|
| 106 |
+
out += residual
|
| 107 |
+
out = self.relu(out)
|
| 108 |
+
|
| 109 |
+
return out
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class Flatten(torch.nn.Module):
|
| 113 |
+
def forward(self, inp):
|
| 114 |
+
return inp.view(inp.size(0), -1)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class ResNet(nn.Module):
|
| 118 |
+
|
| 119 |
+
def __init__(self,
|
| 120 |
+
block,
|
| 121 |
+
layers,
|
| 122 |
+
sample_input_D,
|
| 123 |
+
sample_input_H,
|
| 124 |
+
sample_input_W,
|
| 125 |
+
num_seg_classes,
|
| 126 |
+
shortcut_type='B',
|
| 127 |
+
no_cuda=False):
|
| 128 |
+
self.inplanes = 64
|
| 129 |
+
self.no_cuda = no_cuda
|
| 130 |
+
super(ResNet, self).__init__()
|
| 131 |
+
self.conv1 = nn.Conv3d(
|
| 132 |
+
1,
|
| 133 |
+
64,
|
| 134 |
+
kernel_size=7,
|
| 135 |
+
stride=(2, 2, 2),
|
| 136 |
+
padding=(3, 3, 3),
|
| 137 |
+
bias=False)
|
| 138 |
+
|
| 139 |
+
self.bn1 = nn.BatchNorm3d(64)
|
| 140 |
+
self.relu = nn.ReLU(inplace=True)
|
| 141 |
+
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
|
| 142 |
+
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
|
| 143 |
+
self.layer2 = self._make_layer(
|
| 144 |
+
block, 128, layers[1], shortcut_type, stride=2)
|
| 145 |
+
self.layer3 = self._make_layer(
|
| 146 |
+
block, 256, layers[2], shortcut_type, stride=1, dilation=2)
|
| 147 |
+
self.layer4 = self._make_layer(
|
| 148 |
+
block, 512, layers[3], shortcut_type, stride=1, dilation=4)
|
| 149 |
+
|
| 150 |
+
self.conv_seg = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)), Flatten())
|
| 151 |
+
|
| 152 |
+
for m in self.modules():
|
| 153 |
+
if isinstance(m, nn.Conv3d):
|
| 154 |
+
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
|
| 155 |
+
elif isinstance(m, nn.BatchNorm3d):
|
| 156 |
+
m.weight.data.fill_(1)
|
| 157 |
+
m.bias.data.zero_()
|
| 158 |
+
|
| 159 |
+
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, dilation=1):
|
| 160 |
+
downsample = None
|
| 161 |
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
| 162 |
+
if shortcut_type == 'A':
|
| 163 |
+
downsample = partial(
|
| 164 |
+
downsample_basic_block,
|
| 165 |
+
planes=planes * block.expansion,
|
| 166 |
+
stride=stride,
|
| 167 |
+
no_cuda=self.no_cuda)
|
| 168 |
+
else:
|
| 169 |
+
downsample = nn.Sequential(
|
| 170 |
+
nn.Conv3d(
|
| 171 |
+
self.inplanes,
|
| 172 |
+
planes * block.expansion,
|
| 173 |
+
kernel_size=1,
|
| 174 |
+
stride=stride,
|
| 175 |
+
bias=False), nn.BatchNorm3d(planes * block.expansion))
|
| 176 |
+
|
| 177 |
+
layers = []
|
| 178 |
+
layers.append(block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample))
|
| 179 |
+
self.inplanes = planes * block.expansion
|
| 180 |
+
for i in range(1, blocks):
|
| 181 |
+
layers.append(block(self.inplanes, planes, dilation=dilation))
|
| 182 |
+
|
| 183 |
+
return nn.Sequential(*layers)
|
| 184 |
+
|
| 185 |
+
def forward(self, x):
|
| 186 |
+
x = self.conv1(x)
|
| 187 |
+
x = self.bn1(x)
|
| 188 |
+
x = self.relu(x)
|
| 189 |
+
x = self.maxpool(x)
|
| 190 |
+
x = self.layer1(x)
|
| 191 |
+
x = self.layer2(x)
|
| 192 |
+
x = self.layer3(x)
|
| 193 |
+
x = self.layer4(x)
|
| 194 |
+
x = self.conv_seg(x)
|
| 195 |
+
|
| 196 |
+
return x
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def resnet10(**kwargs):
|
| 200 |
+
"""Constructs a ResNet-18 model.
|
| 201 |
+
"""
|
| 202 |
+
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
|
| 203 |
+
return model
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def resnet18(**kwargs):
|
| 207 |
+
"""Constructs a ResNet-18 model.
|
| 208 |
+
"""
|
| 209 |
+
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
|
| 210 |
+
return model
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def resnet34(**kwargs):
|
| 214 |
+
"""Constructs a ResNet-34 model.
|
| 215 |
+
"""
|
| 216 |
+
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
|
| 217 |
+
return model
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def resnet50(**kwargs):
|
| 221 |
+
"""Constructs a ResNet-50 model.
|
| 222 |
+
"""
|
| 223 |
+
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
|
| 224 |
+
return model
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def resnet101(**kwargs):
|
| 228 |
+
"""Constructs a ResNet-101 model.
|
| 229 |
+
"""
|
| 230 |
+
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
|
| 231 |
+
return model
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def resnet152(**kwargs):
|
| 235 |
+
"""Constructs a ResNet-101 model.
|
| 236 |
+
"""
|
| 237 |
+
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
|
| 238 |
+
return model
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def resnet200(**kwargs):
|
| 242 |
+
"""Constructs a ResNet-101 model.
|
| 243 |
+
"""
|
| 244 |
+
model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
|
| 245 |
+
return model
|
eval/ms_ssim.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
sys.path.append(".")
|
| 7 |
+
sys.path.append("..")
|
| 8 |
+
|
| 9 |
+
from generative.metrics import MultiScaleSSIMMetric
|
| 10 |
+
from monai import transforms
|
| 11 |
+
from monai.config import print_config
|
| 12 |
+
from monai.data import Dataset
|
| 13 |
+
from monai.utils import set_determinism
|
| 14 |
+
from torch.utils.data import DataLoader
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
from guided_diffusion.bratsloader import BRATSVolumes
|
| 17 |
+
from guided_diffusion.lidcloader import LIDCVolumes
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def parse_args():
|
| 21 |
+
parser = argparse.ArgumentParser()
|
| 22 |
+
parser.add_argument("--seed", type=int, default=42, help="Random seed to use.")
|
| 23 |
+
parser.add_argument("--sample_dir", type=str, required=True, help="Location of the samples to evaluate.")
|
| 24 |
+
parser.add_argument("--num_workers", type=int, default=8, help="Number of loader workers")
|
| 25 |
+
parser.add_argument("--dataset", choices=['brats','lidc-idri'], required=True, help="Dataset (brats | lidc-idri)")
|
| 26 |
+
parser.add_argument("--img_size", type=int, required=True)
|
| 27 |
+
|
| 28 |
+
args = parser.parse_args()
|
| 29 |
+
return args
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def main(args):
|
| 33 |
+
set_determinism(seed=args.seed)
|
| 34 |
+
#print_config()
|
| 35 |
+
|
| 36 |
+
if args.dataset == 'brats':
|
| 37 |
+
dataset_1 = BRATSVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
|
| 38 |
+
dataset_2 = BRATSVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
|
| 39 |
+
|
| 40 |
+
elif args.dataset == 'lidc-idri':
|
| 41 |
+
dataset_1 = LIDCVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
|
| 42 |
+
dataset_2 = LIDCVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
dataloader_1 = DataLoader(dataset_1, batch_size=1, shuffle=False, num_workers=args.num_workers)
|
| 46 |
+
dataloader_2 = DataLoader(dataset_2, batch_size=1, shuffle=False, num_workers=args.num_workers)
|
| 47 |
+
|
| 48 |
+
device = torch.device("cuda")
|
| 49 |
+
ms_ssim = MultiScaleSSIMMetric(spatial_dims=3, data_range=1.0, kernel_size=7)
|
| 50 |
+
|
| 51 |
+
print("Computing MS-SSIM (this takes a while)...")
|
| 52 |
+
ms_ssim_list = []
|
| 53 |
+
pbar = tqdm(enumerate(dataloader_1), total=len(dataloader_1))
|
| 54 |
+
for step, batch in pbar:
|
| 55 |
+
img = batch[0]
|
| 56 |
+
for batch2 in dataloader_2:
|
| 57 |
+
img2 = batch2 [0]
|
| 58 |
+
if batch[1] == batch2[1]:
|
| 59 |
+
continue
|
| 60 |
+
ms_ssim_list.append(ms_ssim(img.to(device), img2.to(device)).item())
|
| 61 |
+
pbar.update()
|
| 62 |
+
|
| 63 |
+
ms_ssim_list = np.array(ms_ssim_list)
|
| 64 |
+
print("Calculated MS-SSIMs. Computing mean ...")
|
| 65 |
+
print(f"Mean MS-SSIM: {ms_ssim_list.mean():.6f}")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
args = parse_args()
|
| 70 |
+
main(args)
|
eval/pretrained/pretrained.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Path to store pretrained models.
|
| 2 |
+
We used a pretrained 3D ResNet from: https://github.com/Tencent/MedicalNet
|
| 3 |
+
Pretrained model weights for the model 'resnet_50_23dataset.pth' are available at: https://drive.google.com/file/d/13tnSvXY7oDIEloNFiGTsjUIYfS3g3BfG/view?usp=sharing
|
guided_diffusion/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Codebase for "Diffusion Models for Medial Anomaly Detection".
|
| 3 |
+
"""
|
guided_diffusion/bratsloader.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.utils.data
|
| 4 |
+
import numpy as np
|
| 5 |
+
import os
|
| 6 |
+
import os.path
|
| 7 |
+
import nibabel
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BRATSVolumes(torch.utils.data.Dataset):
|
| 11 |
+
def __init__(self, directory, test_flag=False, normalize=None, mode='train', img_size=256):
|
| 12 |
+
'''
|
| 13 |
+
directory is expected to contain some folder structure:
|
| 14 |
+
if some subfolder contains only files, all of these
|
| 15 |
+
files are assumed to have a name like
|
| 16 |
+
brats_train_NNN_XXX_123_w.nii.gz
|
| 17 |
+
where XXX is one of t1n, t1c, t2w, t2f, seg
|
| 18 |
+
we assume these five files belong to the same image
|
| 19 |
+
seg is supposed to contain the segmentation
|
| 20 |
+
'''
|
| 21 |
+
super().__init__()
|
| 22 |
+
self.mode = mode
|
| 23 |
+
self.directory = os.path.expanduser(directory)
|
| 24 |
+
self.normalize = normalize or (lambda x: x)
|
| 25 |
+
self.test_flag = test_flag
|
| 26 |
+
self.img_size = img_size
|
| 27 |
+
if test_flag:
|
| 28 |
+
self.seqtypes = ['t1n', 't1c', 't2w', 't2f']
|
| 29 |
+
else:
|
| 30 |
+
self.seqtypes = ['t1n', 't1c', 't2w', 't2f', 'seg']
|
| 31 |
+
self.seqtypes_set = set(self.seqtypes)
|
| 32 |
+
self.database = []
|
| 33 |
+
|
| 34 |
+
if not self.mode == 'fake': # Used during training and for evaluating real data
|
| 35 |
+
for root, dirs, files in os.walk(self.directory):
|
| 36 |
+
# if there are no subdirs, we have a datadir
|
| 37 |
+
if not dirs:
|
| 38 |
+
files.sort()
|
| 39 |
+
datapoint = dict()
|
| 40 |
+
# extract all files as channels
|
| 41 |
+
for f in files:
|
| 42 |
+
seqtype = f.split('-')[4].split('.')[0]
|
| 43 |
+
datapoint[seqtype] = os.path.join(root, f)
|
| 44 |
+
self.database.append(datapoint)
|
| 45 |
+
else: # Used for evaluating fake data
|
| 46 |
+
for root, dirs, files in os.walk(self.directory):
|
| 47 |
+
for f in files:
|
| 48 |
+
datapoint = dict()
|
| 49 |
+
datapoint['t1n'] = os.path.join(root, f)
|
| 50 |
+
self.database.append(datapoint)
|
| 51 |
+
|
| 52 |
+
def __getitem__(self, x):
|
| 53 |
+
filedict = self.database[x]
|
| 54 |
+
name = filedict['t1n']
|
| 55 |
+
nib_img = nibabel.load(name) # We only use t1 weighted images
|
| 56 |
+
out = nib_img.get_fdata()
|
| 57 |
+
|
| 58 |
+
if not self.mode == 'fake':
|
| 59 |
+
# CLip and normalize the images
|
| 60 |
+
out_clipped = np.clip(out, np.quantile(out, 0.001), np.quantile(out, 0.999))
|
| 61 |
+
out_normalized = (out_clipped - np.min(out_clipped)) / (np.max(out_clipped) - np.min(out_clipped))
|
| 62 |
+
out = torch.tensor(out_normalized)
|
| 63 |
+
|
| 64 |
+
# Zero pad images
|
| 65 |
+
image = torch.zeros(1, 256, 256, 256)
|
| 66 |
+
image[:, 8:-8, 8:-8, 50:-51] = out
|
| 67 |
+
|
| 68 |
+
# Downsampling
|
| 69 |
+
if self.img_size == 128:
|
| 70 |
+
downsample = nn.AvgPool3d(kernel_size=2, stride=2)
|
| 71 |
+
image = downsample(image)
|
| 72 |
+
else:
|
| 73 |
+
image = torch.tensor(out, dtype=torch.float32)
|
| 74 |
+
image = image.unsqueeze(dim=0)
|
| 75 |
+
|
| 76 |
+
# Normalization
|
| 77 |
+
image = self.normalize(image)
|
| 78 |
+
|
| 79 |
+
if self.mode == 'fake':
|
| 80 |
+
return image, name
|
| 81 |
+
else:
|
| 82 |
+
return image
|
| 83 |
+
|
| 84 |
+
def __len__(self):
|
| 85 |
+
return len(self.database)
|
guided_diffusion/dist_util.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Helpers for distributed training.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import io
|
| 6 |
+
import os
|
| 7 |
+
import socket
|
| 8 |
+
|
| 9 |
+
import blobfile as bf
|
| 10 |
+
import torch as th
|
| 11 |
+
import torch.distributed as dist
|
| 12 |
+
|
| 13 |
+
# Change this to reflect your cluster layout.
|
| 14 |
+
# The GPU for a given rank is (rank % GPUS_PER_NODE).
|
| 15 |
+
GPUS_PER_NODE = 8
|
| 16 |
+
|
| 17 |
+
SETUP_RETRY_COUNT = 3
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def setup_dist(devices=(0,)):
|
| 21 |
+
"""
|
| 22 |
+
Setup a distributed process group.
|
| 23 |
+
"""
|
| 24 |
+
if dist.is_initialized():
|
| 25 |
+
return
|
| 26 |
+
try:
|
| 27 |
+
device_string = ','.join(map(str, devices))
|
| 28 |
+
except TypeError:
|
| 29 |
+
device_string = str(devices)
|
| 30 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = device_string #f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
|
| 31 |
+
|
| 32 |
+
#comm = MPI.COMM_WORLD
|
| 33 |
+
# print('commworld, 'f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}", comm)
|
| 34 |
+
backend = "gloo" if not th.cuda.is_available() else "nccl"
|
| 35 |
+
# print('commrank', comm.rank)
|
| 36 |
+
# print('commsize', comm.size)
|
| 37 |
+
|
| 38 |
+
if backend == "gloo":
|
| 39 |
+
hostname = "localhost"
|
| 40 |
+
else:
|
| 41 |
+
hostname = socket.gethostbyname(socket.getfqdn())
|
| 42 |
+
os.environ["MASTER_ADDR"] = '127.0.1.1'#comm.bcast(hostname, root=0)
|
| 43 |
+
os.environ["RANK"] = '0'#str(comm.rank)
|
| 44 |
+
os.environ["WORLD_SIZE"] = '1'#str(comm.size)
|
| 45 |
+
|
| 46 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 47 |
+
s.bind(("", 0))
|
| 48 |
+
s.listen(1)
|
| 49 |
+
port = s.getsockname()[1]
|
| 50 |
+
s.close()
|
| 51 |
+
# print('port2', port)
|
| 52 |
+
os.environ["MASTER_PORT"] = str(port)
|
| 53 |
+
dist.init_process_group(backend=backend, init_method="env://")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def dev(device_number=0):
|
| 57 |
+
"""
|
| 58 |
+
Get the device to use for torch.distributed.
|
| 59 |
+
"""
|
| 60 |
+
if isinstance(device_number, (list, tuple)): # multiple devices specified
|
| 61 |
+
return [dev(k) for k in device_number] # recursive call
|
| 62 |
+
if th.cuda.is_available():
|
| 63 |
+
device_count = th.cuda.device_count()
|
| 64 |
+
if device_count == 1:
|
| 65 |
+
return th.device(f"cuda")
|
| 66 |
+
else:
|
| 67 |
+
if device_number < device_count: # if we specify multiple devices, we have to be specific
|
| 68 |
+
return th.device(f'cuda:{device_number}')
|
| 69 |
+
else:
|
| 70 |
+
raise ValueError(f'requested device number {device_number} (0-indexed) but only {device_count} devices available')
|
| 71 |
+
return th.device("cpu")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def load_state_dict(path, **kwargs):
|
| 75 |
+
"""
|
| 76 |
+
Load a PyTorch file without redundant fetches across MPI ranks.
|
| 77 |
+
"""
|
| 78 |
+
#print('mpicommworldgetrank', MPI.COMM_WORLD.Get_rank())
|
| 79 |
+
mpigetrank=0
|
| 80 |
+
# if MPI.COMM_WORLD.Get_rank() == 0:
|
| 81 |
+
if mpigetrank==0:
|
| 82 |
+
with bf.BlobFile(path, "rb") as f:
|
| 83 |
+
data = f.read()
|
| 84 |
+
else:
|
| 85 |
+
data = None
|
| 86 |
+
# data = MPI.COMM_WORLD.bcast(data)
|
| 87 |
+
# print('mpibacst', MPI.COMM_WORLD.bcast(data))
|
| 88 |
+
return th.load(io.BytesIO(data), **kwargs)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def sync_params(params):
|
| 92 |
+
"""
|
| 93 |
+
Synchronize a sequence of Tensors across ranks from rank 0.
|
| 94 |
+
"""
|
| 95 |
+
#for p in params:
|
| 96 |
+
# with th.no_grad():
|
| 97 |
+
# dist.broadcast(p, 0)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _find_free_port():
|
| 101 |
+
try:
|
| 102 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 103 |
+
s.bind(("", 0))
|
| 104 |
+
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
| 105 |
+
return s.getsockname()[1]
|
| 106 |
+
finally:
|
| 107 |
+
s.close()
|
guided_diffusion/gaussian_diffusion.py
ADDED
|
@@ -0,0 +1,1222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This code started out as a PyTorch port of Ho et al's diffusion models:
|
| 3 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
|
| 4 |
+
|
| 5 |
+
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
|
| 6 |
+
"""
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from torch.autograd import Variable
|
| 9 |
+
import enum
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from torchvision.utils import save_image
|
| 12 |
+
import torch
|
| 13 |
+
import math
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch as th
|
| 16 |
+
from .train_util import visualize
|
| 17 |
+
from .nn import mean_flat
|
| 18 |
+
from .losses import normal_kl, discretized_gaussian_log_likelihood
|
| 19 |
+
from scipy import ndimage
|
| 20 |
+
from torchvision import transforms
|
| 21 |
+
import matplotlib.pyplot as plt
|
| 22 |
+
from scipy.interpolate import interp1d
|
| 23 |
+
|
| 24 |
+
from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
|
| 25 |
+
|
| 26 |
+
dwt = DWT_3D('haar')
|
| 27 |
+
idwt = IDWT_3D('haar')
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
|
| 31 |
+
"""
|
| 32 |
+
Get a pre-defined beta schedule for the given name.
|
| 33 |
+
|
| 34 |
+
The beta schedule library consists of beta schedules which remain similar
|
| 35 |
+
in the limit of num_diffusion_timesteps.
|
| 36 |
+
Beta schedules may be added, but should not be removed or changed once
|
| 37 |
+
they are committed to maintain backwards compatibility.
|
| 38 |
+
"""
|
| 39 |
+
if schedule_name == "linear":
|
| 40 |
+
# Linear schedule from Ho et al, extended to work for any number of
|
| 41 |
+
# diffusion steps.
|
| 42 |
+
scale = 1000 / num_diffusion_timesteps
|
| 43 |
+
beta_start = scale * 0.0001
|
| 44 |
+
beta_end = scale * 0.02
|
| 45 |
+
return np.linspace(
|
| 46 |
+
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
|
| 47 |
+
)
|
| 48 |
+
elif schedule_name == "cosine":
|
| 49 |
+
return betas_for_alpha_bar(
|
| 50 |
+
num_diffusion_timesteps,
|
| 51 |
+
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
|
| 52 |
+
)
|
| 53 |
+
else:
|
| 54 |
+
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
|
| 58 |
+
"""
|
| 59 |
+
Create a beta schedule that discretizes the given alpha_t_bar function,
|
| 60 |
+
which defines the cumulative product of (1-beta) over time from t = [0,1].
|
| 61 |
+
|
| 62 |
+
:param num_diffusion_timesteps: the number of betas to produce.
|
| 63 |
+
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
|
| 64 |
+
produces the cumulative product of (1-beta) up to that
|
| 65 |
+
part of the diffusion process.
|
| 66 |
+
:param max_beta: the maximum beta to use; use values lower than 1 to
|
| 67 |
+
prevent singularities.
|
| 68 |
+
"""
|
| 69 |
+
betas = []
|
| 70 |
+
for i in range(num_diffusion_timesteps):
|
| 71 |
+
t1 = i / num_diffusion_timesteps
|
| 72 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
| 73 |
+
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
| 74 |
+
return np.array(betas)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class ModelMeanType(enum.Enum):
|
| 78 |
+
"""
|
| 79 |
+
Which type of output the model predicts.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
|
| 83 |
+
START_X = enum.auto() # the model predicts x_0
|
| 84 |
+
EPSILON = enum.auto() # the model predicts epsilon
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class ModelVarType(enum.Enum):
|
| 88 |
+
"""
|
| 89 |
+
What is used as the model's output variance.
|
| 90 |
+
|
| 91 |
+
The LEARNED_RANGE option has been added to allow the model to predict
|
| 92 |
+
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
LEARNED = enum.auto()
|
| 96 |
+
FIXED_SMALL = enum.auto()
|
| 97 |
+
FIXED_LARGE = enum.auto()
|
| 98 |
+
LEARNED_RANGE = enum.auto()
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class LossType(enum.Enum):
|
| 102 |
+
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
|
| 103 |
+
RESCALED_MSE = (
|
| 104 |
+
enum.auto()
|
| 105 |
+
) # use raw MSE loss (with RESCALED_KL when learning variances)
|
| 106 |
+
KL = enum.auto() # use the variational lower-bound
|
| 107 |
+
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
|
| 108 |
+
|
| 109 |
+
def is_vb(self):
|
| 110 |
+
return self == LossType.KL or self == LossType.RESCALED_KL
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class GaussianDiffusion:
|
| 114 |
+
"""
|
| 115 |
+
Utilities for training and sampling diffusion models.
|
| 116 |
+
|
| 117 |
+
Ported directly from here, and then adapted over time to further experimentation.
|
| 118 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
|
| 119 |
+
|
| 120 |
+
:param betas: a 1-D numpy array of betas for each diffusion timestep,
|
| 121 |
+
starting at T and going to 1.
|
| 122 |
+
:param model_mean_type: a ModelMeanType determining what the model outputs.
|
| 123 |
+
:param model_var_type: a ModelVarType determining how variance is output.
|
| 124 |
+
:param loss_type: a LossType determining the loss function to use.
|
| 125 |
+
:param rescale_timesteps: if True, pass floating point timesteps into the
|
| 126 |
+
model so that they are always scaled like in the
|
| 127 |
+
original paper (0 to 1000).
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self,
|
| 132 |
+
*,
|
| 133 |
+
betas,
|
| 134 |
+
model_mean_type,
|
| 135 |
+
model_var_type,
|
| 136 |
+
loss_type,
|
| 137 |
+
rescale_timesteps=False,
|
| 138 |
+
mode='default',
|
| 139 |
+
loss_level='image'
|
| 140 |
+
):
|
| 141 |
+
self.model_mean_type = model_mean_type
|
| 142 |
+
self.model_var_type = model_var_type
|
| 143 |
+
self.loss_type = loss_type
|
| 144 |
+
self.rescale_timesteps = rescale_timesteps
|
| 145 |
+
self.mode = mode
|
| 146 |
+
self.loss_level=loss_level
|
| 147 |
+
|
| 148 |
+
# Use float64 for accuracy.
|
| 149 |
+
betas = np.array(betas, dtype=np.float64)
|
| 150 |
+
self.betas = betas
|
| 151 |
+
assert len(betas.shape) == 1, "betas must be 1-D"
|
| 152 |
+
assert (betas > 0).all() and (betas <= 1).all()
|
| 153 |
+
|
| 154 |
+
self.num_timesteps = int(betas.shape[0])
|
| 155 |
+
|
| 156 |
+
alphas = 1.0 - betas
|
| 157 |
+
self.alphas_cumprod = np.cumprod(alphas, axis=0) # t
|
| 158 |
+
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) # t-1
|
| 159 |
+
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) # t+1
|
| 160 |
+
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
|
| 161 |
+
|
| 162 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
| 163 |
+
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
|
| 164 |
+
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
|
| 165 |
+
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
|
| 166 |
+
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
|
| 167 |
+
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
|
| 168 |
+
|
| 169 |
+
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
| 170 |
+
self.posterior_variance = (
|
| 171 |
+
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
|
| 172 |
+
)
|
| 173 |
+
# log calculation clipped because the posterior variance is 0 at the
|
| 174 |
+
# beginning of the diffusion chain.
|
| 175 |
+
self.posterior_log_variance_clipped = np.log(
|
| 176 |
+
np.append(self.posterior_variance[1], self.posterior_variance[1:])
|
| 177 |
+
)
|
| 178 |
+
self.posterior_mean_coef1 = (
|
| 179 |
+
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
|
| 180 |
+
)
|
| 181 |
+
self.posterior_mean_coef2 = (
|
| 182 |
+
(1.0 - self.alphas_cumprod_prev)
|
| 183 |
+
* np.sqrt(alphas)
|
| 184 |
+
/ (1.0 - self.alphas_cumprod)
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
def q_mean_variance(self, x_start, t):
|
| 188 |
+
"""
|
| 189 |
+
Get the distribution q(x_t | x_0).
|
| 190 |
+
|
| 191 |
+
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
| 192 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
| 193 |
+
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
| 194 |
+
"""
|
| 195 |
+
mean = (
|
| 196 |
+
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
| 197 |
+
)
|
| 198 |
+
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
| 199 |
+
log_variance = _extract_into_tensor(
|
| 200 |
+
self.log_one_minus_alphas_cumprod, t, x_start.shape
|
| 201 |
+
)
|
| 202 |
+
return mean, variance, log_variance
|
| 203 |
+
|
| 204 |
+
def q_sample(self, x_start, t, noise=None):
|
| 205 |
+
"""
|
| 206 |
+
Diffuse the data for a given number of diffusion steps.
|
| 207 |
+
|
| 208 |
+
In other words, sample from q(x_t | x_0).
|
| 209 |
+
|
| 210 |
+
:param x_start: the initial data batch.
|
| 211 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
| 212 |
+
:param noise: if specified, the split-out normal noise.
|
| 213 |
+
:return: A noisy version of x_start.
|
| 214 |
+
"""
|
| 215 |
+
if noise is None:
|
| 216 |
+
noise = th.randn_like(x_start)
|
| 217 |
+
assert noise.shape == x_start.shape
|
| 218 |
+
return (
|
| 219 |
+
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
| 220 |
+
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
|
| 221 |
+
* noise
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
def q_posterior_mean_variance(self, x_start, x_t, t):
|
| 225 |
+
"""
|
| 226 |
+
Compute the mean and variance of the diffusion posterior:
|
| 227 |
+
|
| 228 |
+
q(x_{t-1} | x_t, x_0)
|
| 229 |
+
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
assert x_start.shape == x_t.shape
|
| 233 |
+
posterior_mean = (
|
| 234 |
+
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
|
| 235 |
+
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
| 236 |
+
)
|
| 237 |
+
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
| 238 |
+
posterior_log_variance_clipped = _extract_into_tensor(
|
| 239 |
+
self.posterior_log_variance_clipped, t, x_t.shape
|
| 240 |
+
)
|
| 241 |
+
assert (
|
| 242 |
+
posterior_mean.shape[0]
|
| 243 |
+
== posterior_variance.shape[0]
|
| 244 |
+
== posterior_log_variance_clipped.shape[0]
|
| 245 |
+
== x_start.shape[0]
|
| 246 |
+
)
|
| 247 |
+
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
| 248 |
+
|
| 249 |
+
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
|
| 250 |
+
"""
|
| 251 |
+
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
|
| 252 |
+
the initial x, x_0.
|
| 253 |
+
:param model: the model, which takes a signal and a batch of timesteps
|
| 254 |
+
as input.
|
| 255 |
+
:param x: the [N x C x ...] tensor at time t.
|
| 256 |
+
:param t: a 1-D Tensor of timesteps.
|
| 257 |
+
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
|
| 258 |
+
:param denoised_fn: if not None, a function which applies to the
|
| 259 |
+
x_start prediction before it is used to sample. Applies before
|
| 260 |
+
clip_denoised.
|
| 261 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
| 262 |
+
pass to the model. This can be used for conditioning.
|
| 263 |
+
:return: a dict with the following keys:
|
| 264 |
+
- 'mean': the model mean output.
|
| 265 |
+
- 'variance': the model variance output.
|
| 266 |
+
- 'log_variance': the log of 'variance'.
|
| 267 |
+
- 'pred_xstart': the prediction for x_0.
|
| 268 |
+
"""
|
| 269 |
+
if model_kwargs is None:
|
| 270 |
+
model_kwargs = {}
|
| 271 |
+
|
| 272 |
+
B, C = x.shape[:2]
|
| 273 |
+
|
| 274 |
+
assert t.shape == (B,)
|
| 275 |
+
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
|
| 276 |
+
|
| 277 |
+
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
|
| 278 |
+
assert model_output.shape == (B, C * 2, *x.shape[2:])
|
| 279 |
+
model_output, model_var_values = th.split(model_output, C, dim=1)
|
| 280 |
+
if self.model_var_type == ModelVarType.LEARNED:
|
| 281 |
+
model_log_variance = model_var_values
|
| 282 |
+
model_variance = th.exp(model_log_variance)
|
| 283 |
+
else:
|
| 284 |
+
min_log = _extract_into_tensor(
|
| 285 |
+
self.posterior_log_variance_clipped, t, x.shape
|
| 286 |
+
)
|
| 287 |
+
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
|
| 288 |
+
# The model_var_values is [-1, 1] for [min_var, max_var].
|
| 289 |
+
frac = (model_var_values + 1) / 2
|
| 290 |
+
model_log_variance = frac * max_log + (1 - frac) * min_log
|
| 291 |
+
model_variance = th.exp(model_log_variance)
|
| 292 |
+
else:
|
| 293 |
+
model_variance, model_log_variance = {
|
| 294 |
+
# for fixedlarge, we set the initial (log-)variance like so
|
| 295 |
+
# to get a better decoder log likelihood.
|
| 296 |
+
ModelVarType.FIXED_LARGE: (
|
| 297 |
+
np.append(self.posterior_variance[1], self.betas[1:]),
|
| 298 |
+
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
|
| 299 |
+
),
|
| 300 |
+
ModelVarType.FIXED_SMALL: (
|
| 301 |
+
self.posterior_variance,
|
| 302 |
+
self.posterior_log_variance_clipped,
|
| 303 |
+
),
|
| 304 |
+
}[self.model_var_type]
|
| 305 |
+
model_variance = _extract_into_tensor(model_variance, t, x.shape)
|
| 306 |
+
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
|
| 307 |
+
|
| 308 |
+
def process_xstart(x):
|
| 309 |
+
if denoised_fn is not None:
|
| 310 |
+
x = denoised_fn(x)
|
| 311 |
+
if clip_denoised:
|
| 312 |
+
B, _, H, W, D = x.size()
|
| 313 |
+
x_idwt = idwt(x[:, 0, :, :, :].view(B, 1, H, W, D) * 3.,
|
| 314 |
+
x[:, 1, :, :, :].view(B, 1, H, W, D),
|
| 315 |
+
x[:, 2, :, :, :].view(B, 1, H, W, D),
|
| 316 |
+
x[:, 3, :, :, :].view(B, 1, H, W, D),
|
| 317 |
+
x[:, 4, :, :, :].view(B, 1, H, W, D),
|
| 318 |
+
x[:, 5, :, :, :].view(B, 1, H, W, D),
|
| 319 |
+
x[:, 6, :, :, :].view(B, 1, H, W, D),
|
| 320 |
+
x[:, 7, :, :, :].view(B, 1, H, W, D))
|
| 321 |
+
|
| 322 |
+
x_idwt_clamp = x_idwt.clamp(-1, 1)
|
| 323 |
+
|
| 324 |
+
LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(x_idwt_clamp)
|
| 325 |
+
x = th.cat([LLL / 3., LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1)
|
| 326 |
+
|
| 327 |
+
return x
|
| 328 |
+
return x
|
| 329 |
+
|
| 330 |
+
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
|
| 331 |
+
pred_xstart = process_xstart(
|
| 332 |
+
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
|
| 333 |
+
)
|
| 334 |
+
model_mean = model_output
|
| 335 |
+
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
|
| 336 |
+
if self.model_mean_type == ModelMeanType.START_X:
|
| 337 |
+
pred_xstart = process_xstart(model_output)
|
| 338 |
+
else:
|
| 339 |
+
pred_xstart = process_xstart(
|
| 340 |
+
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
|
| 341 |
+
)
|
| 342 |
+
model_mean, _, _ = self.q_posterior_mean_variance(
|
| 343 |
+
x_start=pred_xstart, x_t=x, t=t
|
| 344 |
+
)
|
| 345 |
+
else:
|
| 346 |
+
raise NotImplementedError(self.model_mean_type)
|
| 347 |
+
|
| 348 |
+
assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
return {
|
| 352 |
+
"mean": model_mean,
|
| 353 |
+
"variance": model_variance,
|
| 354 |
+
"log_variance": model_log_variance,
|
| 355 |
+
"pred_xstart": pred_xstart,
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
def _predict_xstart_from_eps(self, x_t, t, eps):
|
| 359 |
+
assert x_t.shape == eps.shape
|
| 360 |
+
return (
|
| 361 |
+
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
|
| 362 |
+
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
def _predict_xstart_from_xprev(self, x_t, t, xprev):
|
| 366 |
+
assert x_t.shape == xprev.shape
|
| 367 |
+
return ( # (xprev - coef2*x_t) / coef1
|
| 368 |
+
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
|
| 369 |
+
- _extract_into_tensor(
|
| 370 |
+
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
|
| 371 |
+
)
|
| 372 |
+
* x_t
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
| 376 |
+
if self.mode == 'segmentation':
|
| 377 |
+
x_t = x_t[:, -pred_xstart.shape[1]:, ...]
|
| 378 |
+
assert pred_xstart.shape == x_t.shape
|
| 379 |
+
eps = (
|
| 380 |
+
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
|
| 381 |
+
- pred_xstart
|
| 382 |
+
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
| 383 |
+
return eps
|
| 384 |
+
|
| 385 |
+
def _scale_timesteps(self, t):
|
| 386 |
+
if self.rescale_timesteps:
|
| 387 |
+
return t.float() * (1000.0 / self.num_timesteps)
|
| 388 |
+
return t
|
| 389 |
+
|
| 390 |
+
def condition_mean(self, cond_fn, p_mean_var, x, t, update=None, model_kwargs=None):
|
| 391 |
+
"""
|
| 392 |
+
Compute the mean for the previous step, given a function cond_fn that
|
| 393 |
+
computes the gradient of a conditional log probability with respect to
|
| 394 |
+
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
|
| 395 |
+
condition on y.
|
| 396 |
+
|
| 397 |
+
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
|
| 398 |
+
"""
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
if update is not None:
|
| 402 |
+
print('CONDITION MEAN UPDATE NOT NONE')
|
| 403 |
+
|
| 404 |
+
new_mean = (
|
| 405 |
+
p_mean_var["mean"].detach().float() + p_mean_var["variance"].detach() * update.float()
|
| 406 |
+
)
|
| 407 |
+
a=update
|
| 408 |
+
|
| 409 |
+
else:
|
| 410 |
+
a, gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
|
| 411 |
+
new_mean = (
|
| 412 |
+
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
return a, new_mean
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def condition_score2(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
|
| 420 |
+
"""
|
| 421 |
+
Compute what the p_mean_variance output would have been, should the
|
| 422 |
+
model's score function be conditioned by cond_fn.
|
| 423 |
+
See condition_mean() for details on cond_fn.
|
| 424 |
+
Unlike condition_mean(), this instead uses the conditioning strategy
|
| 425 |
+
from Song et al (2020).
|
| 426 |
+
"""
|
| 427 |
+
t=t.long()
|
| 428 |
+
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
|
| 429 |
+
|
| 430 |
+
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
|
| 431 |
+
a, cfn= cond_fn(
|
| 432 |
+
x, self._scale_timesteps(t).long(), **model_kwargs
|
| 433 |
+
)
|
| 434 |
+
eps = eps - (1 - alpha_bar).sqrt() * cfn
|
| 435 |
+
|
| 436 |
+
out = p_mean_var.copy()
|
| 437 |
+
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
|
| 438 |
+
out["mean"], _, _ = self.q_posterior_mean_variance(
|
| 439 |
+
x_start=out["pred_xstart"], x_t=x, t=t
|
| 440 |
+
)
|
| 441 |
+
return out, cfn
|
| 442 |
+
|
| 443 |
+
def sample_known(self, img, batch_size = 1):
|
| 444 |
+
image_size = self.image_size
|
| 445 |
+
channels = self.channels
|
| 446 |
+
return self.p_sample_loop_known(model,(batch_size, channels, image_size, image_size), img)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def p_sample_loop(
|
| 450 |
+
self,
|
| 451 |
+
model,
|
| 452 |
+
shape,
|
| 453 |
+
noise=None,
|
| 454 |
+
clip_denoised=True,
|
| 455 |
+
denoised_fn=None,
|
| 456 |
+
cond_fn=None,
|
| 457 |
+
model_kwargs=None,
|
| 458 |
+
device=None,
|
| 459 |
+
progress=True,
|
| 460 |
+
):
|
| 461 |
+
"""
|
| 462 |
+
Generate samples from the model.
|
| 463 |
+
|
| 464 |
+
:param model: the model module.
|
| 465 |
+
:param shape: the shape of the samples, (N, C, H, W).
|
| 466 |
+
:param noise: if specified, the noise from the encoder to sample.
|
| 467 |
+
Should be of the same shape as `shape`.
|
| 468 |
+
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
|
| 469 |
+
:param denoised_fn: if not None, a function which applies to the
|
| 470 |
+
x_start prediction before it is used to sample.
|
| 471 |
+
:param cond_fn: if not None, this is a gradient function that acts
|
| 472 |
+
similarly to the model.
|
| 473 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
| 474 |
+
pass to the model. This can be used for conditioning.
|
| 475 |
+
:param device: if specified, the device to create the samples on.
|
| 476 |
+
If not specified, use a model parameter's device.
|
| 477 |
+
:param progress: if True, show a tqdm progress bar.
|
| 478 |
+
:return: a non-differentiable batch of samples.
|
| 479 |
+
"""
|
| 480 |
+
final = None
|
| 481 |
+
for sample in self.p_sample_loop_progressive(
|
| 482 |
+
model,
|
| 483 |
+
shape,
|
| 484 |
+
noise=noise,
|
| 485 |
+
clip_denoised=clip_denoised,
|
| 486 |
+
denoised_fn=denoised_fn,
|
| 487 |
+
cond_fn=cond_fn,
|
| 488 |
+
model_kwargs=model_kwargs,
|
| 489 |
+
device=device,
|
| 490 |
+
progress=progress,
|
| 491 |
+
):
|
| 492 |
+
final = sample
|
| 493 |
+
return final["sample"]
|
| 494 |
+
|
| 495 |
+
def p_sample(
|
| 496 |
+
self,
|
| 497 |
+
model,
|
| 498 |
+
x,
|
| 499 |
+
t,
|
| 500 |
+
clip_denoised=True,
|
| 501 |
+
denoised_fn=None,
|
| 502 |
+
cond_fn=None,
|
| 503 |
+
model_kwargs=None,
|
| 504 |
+
):
|
| 505 |
+
"""
|
| 506 |
+
Sample x_{t-1} from the model at the given timestep.
|
| 507 |
+
:param model: the model to sample from.
|
| 508 |
+
:param x: the current tensor at x_{t-1}.
|
| 509 |
+
:param t: the value of t, starting at 0 for the first diffusion step.
|
| 510 |
+
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
|
| 511 |
+
:param denoised_fn: if not None, a function which applies to the
|
| 512 |
+
x_start prediction before it is used to sample.
|
| 513 |
+
:param cond_fn: if not None, this is a gradient function that acts
|
| 514 |
+
similarly to the model.
|
| 515 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
| 516 |
+
pass to the model. This can be used for conditioning.
|
| 517 |
+
:return: a dict containing the following keys:
|
| 518 |
+
- 'sample': a random sample from the model.
|
| 519 |
+
- 'pred_xstart': a prediction of x_0.
|
| 520 |
+
"""
|
| 521 |
+
out = self.p_mean_variance(
|
| 522 |
+
model,
|
| 523 |
+
x,
|
| 524 |
+
t,
|
| 525 |
+
clip_denoised=clip_denoised,
|
| 526 |
+
denoised_fn=denoised_fn,
|
| 527 |
+
model_kwargs=model_kwargs,
|
| 528 |
+
)
|
| 529 |
+
noise = th.randn_like(x)
|
| 530 |
+
nonzero_mask = (
|
| 531 |
+
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
|
| 532 |
+
) # no noise when t == 0
|
| 533 |
+
if cond_fn is not None:
|
| 534 |
+
out["mean"] = self.condition_mean(
|
| 535 |
+
cond_fn, out, x, t, model_kwargs=model_kwargs
|
| 536 |
+
)
|
| 537 |
+
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
|
| 538 |
+
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
|
| 539 |
+
|
| 540 |
+
def p_sample_loop_known(
|
| 541 |
+
self,
|
| 542 |
+
model,
|
| 543 |
+
shape,
|
| 544 |
+
img,
|
| 545 |
+
org=None,
|
| 546 |
+
noise=None,
|
| 547 |
+
clip_denoised=True,
|
| 548 |
+
denoised_fn=None,
|
| 549 |
+
cond_fn=None,
|
| 550 |
+
model_kwargs=None,
|
| 551 |
+
device=None,
|
| 552 |
+
noise_level=500,
|
| 553 |
+
progress=False,
|
| 554 |
+
classifier=None
|
| 555 |
+
):
|
| 556 |
+
if device is None:
|
| 557 |
+
device = next(model.parameters()).device
|
| 558 |
+
assert isinstance(shape, (tuple, list))
|
| 559 |
+
b = shape[0]
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
t = th.randint(499,500, (b,), device=device).long().to(device)
|
| 563 |
+
|
| 564 |
+
org=img[0].to(device)
|
| 565 |
+
img=img[0].to(device)
|
| 566 |
+
indices = list(range(t))[::-1]
|
| 567 |
+
noise = th.randn_like(img[:, :4, ...]).to(device)
|
| 568 |
+
x_noisy = self.q_sample(x_start=img[:, :4, ...], t=t, noise=noise).to(device)
|
| 569 |
+
x_noisy = torch.cat((x_noisy, img[:, 4:, ...]), dim=1)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
for sample in self.p_sample_loop_progressive(
|
| 573 |
+
model,
|
| 574 |
+
shape,
|
| 575 |
+
time=noise_level,
|
| 576 |
+
noise=x_noisy,
|
| 577 |
+
clip_denoised=clip_denoised,
|
| 578 |
+
denoised_fn=denoised_fn,
|
| 579 |
+
cond_fn=cond_fn,
|
| 580 |
+
org=org,
|
| 581 |
+
model_kwargs=model_kwargs,
|
| 582 |
+
device=device,
|
| 583 |
+
progress=progress,
|
| 584 |
+
classifier=classifier
|
| 585 |
+
):
|
| 586 |
+
final = sample
|
| 587 |
+
|
| 588 |
+
return final["sample"], x_noisy, img
|
| 589 |
+
|
| 590 |
+
def p_sample_loop_interpolation(
|
| 591 |
+
self,
|
| 592 |
+
model,
|
| 593 |
+
shape,
|
| 594 |
+
img1,
|
| 595 |
+
img2,
|
| 596 |
+
lambdaint,
|
| 597 |
+
noise=None,
|
| 598 |
+
clip_denoised=True,
|
| 599 |
+
denoised_fn=None,
|
| 600 |
+
cond_fn=None,
|
| 601 |
+
model_kwargs=None,
|
| 602 |
+
device=None,
|
| 603 |
+
progress=False,
|
| 604 |
+
):
|
| 605 |
+
if device is None:
|
| 606 |
+
device = next(model.parameters()).device
|
| 607 |
+
assert isinstance(shape, (tuple, list))
|
| 608 |
+
b = shape[0]
|
| 609 |
+
t = th.randint(299,300, (b,), device=device).long().to(device)
|
| 610 |
+
img1=torch.tensor(img1).to(device)
|
| 611 |
+
img2 = torch.tensor(img2).to(device)
|
| 612 |
+
noise = th.randn_like(img1).to(device)
|
| 613 |
+
x_noisy1 = self.q_sample(x_start=img1, t=t, noise=noise).to(device)
|
| 614 |
+
x_noisy2 = self.q_sample(x_start=img2, t=t, noise=noise).to(device)
|
| 615 |
+
interpol=lambdaint*x_noisy1+(1-lambdaint)*x_noisy2
|
| 616 |
+
for sample in self.p_sample_loop_progressive(
|
| 617 |
+
model,
|
| 618 |
+
shape,
|
| 619 |
+
time=t,
|
| 620 |
+
noise=interpol,
|
| 621 |
+
clip_denoised=clip_denoised,
|
| 622 |
+
denoised_fn=denoised_fn,
|
| 623 |
+
cond_fn=cond_fn,
|
| 624 |
+
model_kwargs=model_kwargs,
|
| 625 |
+
device=device,
|
| 626 |
+
progress=progress,
|
| 627 |
+
):
|
| 628 |
+
final = sample
|
| 629 |
+
return final["sample"], interpol, img1, img2
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
def p_sample_loop_progressive(
|
| 633 |
+
self,
|
| 634 |
+
model,
|
| 635 |
+
shape,
|
| 636 |
+
time=1000,
|
| 637 |
+
noise=None,
|
| 638 |
+
clip_denoised=True,
|
| 639 |
+
denoised_fn=None,
|
| 640 |
+
cond_fn=None,
|
| 641 |
+
model_kwargs=None,
|
| 642 |
+
device=None,
|
| 643 |
+
progress=True,
|
| 644 |
+
):
|
| 645 |
+
"""
|
| 646 |
+
Generate samples from the model and yield intermediate samples from
|
| 647 |
+
each timestep of diffusion.
|
| 648 |
+
|
| 649 |
+
Arguments are the same as p_sample_loop().
|
| 650 |
+
Returns a generator over dicts, where each dict is the return value of
|
| 651 |
+
p_sample().
|
| 652 |
+
"""
|
| 653 |
+
|
| 654 |
+
if device is None:
|
| 655 |
+
device = next(model.parameters()).device
|
| 656 |
+
assert isinstance(shape, (tuple, list))
|
| 657 |
+
use_inpaint = model_kwargs is not None and 'context' in model_kwargs and 'mask' in model_kwargs
|
| 658 |
+
if noise is not None:
|
| 659 |
+
img = noise
|
| 660 |
+
else:
|
| 661 |
+
img = th.randn(*shape, device=device)
|
| 662 |
+
if use_inpaint:
|
| 663 |
+
img = model_kwargs['context'] + img * model_kwargs['mask']
|
| 664 |
+
|
| 665 |
+
indices = list(range(time))[::-1]
|
| 666 |
+
if progress:
|
| 667 |
+
# Lazy import so that we don't depend on tqdm.
|
| 668 |
+
from tqdm.auto import tqdm
|
| 669 |
+
indices = tqdm(indices)
|
| 670 |
+
|
| 671 |
+
for i in indices:
|
| 672 |
+
t = th.tensor([i] * shape[0], device=device)
|
| 673 |
+
with th.no_grad():
|
| 674 |
+
cur = img
|
| 675 |
+
if use_inpaint:
|
| 676 |
+
cur = model_kwargs['context'] + img * model_kwargs['mask']
|
| 677 |
+
model_input = th.cat([cur, model_kwargs['mask']], dim=1)
|
| 678 |
+
else:
|
| 679 |
+
model_input = cur
|
| 680 |
+
out = self.p_sample(
|
| 681 |
+
model,
|
| 682 |
+
model_input,
|
| 683 |
+
t,
|
| 684 |
+
clip_denoised=clip_denoised,
|
| 685 |
+
denoised_fn=denoised_fn,
|
| 686 |
+
cond_fn=cond_fn,
|
| 687 |
+
model_kwargs=None,
|
| 688 |
+
)
|
| 689 |
+
img = out["sample"]
|
| 690 |
+
if use_inpaint:
|
| 691 |
+
img = model_kwargs['context'] + img * model_kwargs['mask']
|
| 692 |
+
yield {"sample": img}
|
| 693 |
+
|
| 694 |
+
def ddim_sample(
|
| 695 |
+
self,
|
| 696 |
+
model,
|
| 697 |
+
x,
|
| 698 |
+
t, # index of current step
|
| 699 |
+
t_cpu=None,
|
| 700 |
+
t_prev=None, # index of step that we are going to compute, only used for heun
|
| 701 |
+
t_prev_cpu=None,
|
| 702 |
+
clip_denoised=True,
|
| 703 |
+
denoised_fn=None,
|
| 704 |
+
cond_fn=None,
|
| 705 |
+
model_kwargs=None,
|
| 706 |
+
eta=0.0,
|
| 707 |
+
sampling_steps=0,
|
| 708 |
+
):
|
| 709 |
+
"""
|
| 710 |
+
Sample x_{t-1} from the model using DDIM.
|
| 711 |
+
Same usage as p_sample().
|
| 712 |
+
"""
|
| 713 |
+
relerr = lambda x, y: (x-y).abs().sum() / y.abs().sum()
|
| 714 |
+
if cond_fn is not None:
|
| 715 |
+
out, saliency = self.condition_score2(cond_fn, out, x, t, model_kwargs=model_kwargs)
|
| 716 |
+
out = self.p_mean_variance(
|
| 717 |
+
model,
|
| 718 |
+
x,
|
| 719 |
+
t,
|
| 720 |
+
clip_denoised=clip_denoised,
|
| 721 |
+
denoised_fn=denoised_fn,
|
| 722 |
+
model_kwargs=model_kwargs,
|
| 723 |
+
)
|
| 724 |
+
eps_orig = self._predict_eps_from_xstart(x_t=x, t=t, pred_xstart=out["pred_xstart"])
|
| 725 |
+
if self.mode == 'default':
|
| 726 |
+
shape = x.shape
|
| 727 |
+
elif self.mode == 'segmentation':
|
| 728 |
+
shape = eps_orig.shape
|
| 729 |
+
else:
|
| 730 |
+
raise NotImplementedError(f'mode "{self.mode}" not implemented')
|
| 731 |
+
|
| 732 |
+
if not sampling_steps:
|
| 733 |
+
alpha_bar_orig = _extract_into_tensor(self.alphas_cumprod, t, shape)
|
| 734 |
+
alpha_bar_prev_orig = _extract_into_tensor(self.alphas_cumprod_prev, t, shape)
|
| 735 |
+
else:
|
| 736 |
+
xp = np.arange(0, 1000, 1, dtype=np.float)
|
| 737 |
+
alpha_cumprod_fun = interp1d(xp, self.alphas_cumprod,
|
| 738 |
+
bounds_error=False,
|
| 739 |
+
fill_value=(self.alphas_cumprod[0], self.alphas_cumprod[-1]),
|
| 740 |
+
)
|
| 741 |
+
alpha_bar_orig = alpha_cumprod_fun(t_cpu).item()
|
| 742 |
+
alpha_bar_prev_orig = alpha_cumprod_fun(t_prev_cpu).item()
|
| 743 |
+
sigma = (
|
| 744 |
+
eta
|
| 745 |
+
* ((1 - alpha_bar_prev_orig) / (1 - alpha_bar_orig))**.5
|
| 746 |
+
* (1 - alpha_bar_orig / alpha_bar_prev_orig)**.5
|
| 747 |
+
)
|
| 748 |
+
noise = th.randn(size=shape, device=x.device)
|
| 749 |
+
mean_pred = (
|
| 750 |
+
out["pred_xstart"] * alpha_bar_prev_orig**.5
|
| 751 |
+
+ (1 - alpha_bar_prev_orig - sigma ** 2)**.5 * eps_orig
|
| 752 |
+
)
|
| 753 |
+
nonzero_mask = (
|
| 754 |
+
(t != 0).float().view(-1, *([1] * (len(shape) - 1)))
|
| 755 |
+
)
|
| 756 |
+
sample = mean_pred + nonzero_mask * sigma * noise
|
| 757 |
+
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
def ddim_reverse_sample(
|
| 761 |
+
self,
|
| 762 |
+
model,
|
| 763 |
+
x,
|
| 764 |
+
t,
|
| 765 |
+
clip_denoised=True,
|
| 766 |
+
denoised_fn=None,
|
| 767 |
+
model_kwargs=None,
|
| 768 |
+
eta=0.0,
|
| 769 |
+
):
|
| 770 |
+
"""
|
| 771 |
+
Sample x_{t+1} from the model using DDIM reverse ODE.
|
| 772 |
+
"""
|
| 773 |
+
assert eta == 0.0, "Reverse ODE only for deterministic path"
|
| 774 |
+
out = self.p_mean_variance(
|
| 775 |
+
model,
|
| 776 |
+
x,
|
| 777 |
+
t,
|
| 778 |
+
clip_denoised=clip_denoised,
|
| 779 |
+
denoised_fn=denoised_fn,
|
| 780 |
+
model_kwargs=model_kwargs,
|
| 781 |
+
)
|
| 782 |
+
# Usually our model outputs epsilon, but we re-derive it
|
| 783 |
+
# in case we used x_start or x_prev prediction.
|
| 784 |
+
eps = (
|
| 785 |
+
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
|
| 786 |
+
- out["pred_xstart"]
|
| 787 |
+
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
|
| 788 |
+
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
|
| 789 |
+
|
| 790 |
+
# Equation 12. reversed
|
| 791 |
+
mean_pred = (
|
| 792 |
+
out["pred_xstart"] * th.sqrt(alpha_bar_next)
|
| 793 |
+
+ th.sqrt(1 - alpha_bar_next) * eps
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
def ddim_sample_loop_interpolation(
|
| 801 |
+
self,
|
| 802 |
+
model,
|
| 803 |
+
shape,
|
| 804 |
+
img1,
|
| 805 |
+
img2,
|
| 806 |
+
lambdaint,
|
| 807 |
+
noise=None,
|
| 808 |
+
clip_denoised=True,
|
| 809 |
+
denoised_fn=None,
|
| 810 |
+
cond_fn=None,
|
| 811 |
+
model_kwargs=None,
|
| 812 |
+
device=None,
|
| 813 |
+
progress=False,
|
| 814 |
+
):
|
| 815 |
+
if device is None:
|
| 816 |
+
device = next(model.parameters()).device
|
| 817 |
+
assert isinstance(shape, (tuple, list))
|
| 818 |
+
b = shape[0]
|
| 819 |
+
t = th.randint(199,200, (b,), device=device).long().to(device)
|
| 820 |
+
img1=torch.tensor(img1).to(device)
|
| 821 |
+
img2 = torch.tensor(img2).to(device)
|
| 822 |
+
noise = th.randn_like(img1).to(device)
|
| 823 |
+
x_noisy1 = self.q_sample(x_start=img1, t=t, noise=noise).to(device)
|
| 824 |
+
x_noisy2 = self.q_sample(x_start=img2, t=t, noise=noise).to(device)
|
| 825 |
+
interpol=lambdaint*x_noisy1+(1-lambdaint)*x_noisy2
|
| 826 |
+
for sample in self.ddim_sample_loop_progressive(
|
| 827 |
+
model,
|
| 828 |
+
shape,
|
| 829 |
+
time=t,
|
| 830 |
+
noise=interpol,
|
| 831 |
+
clip_denoised=clip_denoised,
|
| 832 |
+
denoised_fn=denoised_fn,
|
| 833 |
+
cond_fn=cond_fn,
|
| 834 |
+
model_kwargs=model_kwargs,
|
| 835 |
+
device=device,
|
| 836 |
+
progress=progress,
|
| 837 |
+
):
|
| 838 |
+
final = sample
|
| 839 |
+
return final["sample"], interpol, img1, img2
|
| 840 |
+
|
| 841 |
+
def ddim_sample_loop(
|
| 842 |
+
self,
|
| 843 |
+
model,
|
| 844 |
+
shape,
|
| 845 |
+
noise=None,
|
| 846 |
+
clip_denoised=True,
|
| 847 |
+
denoised_fn=None,
|
| 848 |
+
cond_fn=None,
|
| 849 |
+
model_kwargs=None,
|
| 850 |
+
device=None,
|
| 851 |
+
progress=False,
|
| 852 |
+
eta=0.0,
|
| 853 |
+
sampling_steps=0,
|
| 854 |
+
):
|
| 855 |
+
"""
|
| 856 |
+
Generate samples from the model using DDIM.
|
| 857 |
+
|
| 858 |
+
Same usage as p_sample_loop().
|
| 859 |
+
"""
|
| 860 |
+
final = None
|
| 861 |
+
if device is None:
|
| 862 |
+
device = next(model.parameters()).device
|
| 863 |
+
assert isinstance(shape, (tuple, list))
|
| 864 |
+
b = shape[0]
|
| 865 |
+
#t = th.randint(0,1, (b,), device=device).long().to(device)
|
| 866 |
+
t = 1000
|
| 867 |
+
for sample in self.ddim_sample_loop_progressive(
|
| 868 |
+
model,
|
| 869 |
+
shape,
|
| 870 |
+
time=t,
|
| 871 |
+
noise=noise,
|
| 872 |
+
clip_denoised=clip_denoised,
|
| 873 |
+
denoised_fn=denoised_fn,
|
| 874 |
+
cond_fn=cond_fn,
|
| 875 |
+
model_kwargs=model_kwargs,
|
| 876 |
+
device=device,
|
| 877 |
+
progress=progress,
|
| 878 |
+
eta=eta,
|
| 879 |
+
sampling_steps=sampling_steps,
|
| 880 |
+
):
|
| 881 |
+
|
| 882 |
+
final = sample
|
| 883 |
+
return final["sample"]
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def ddim_sample_loop_known(
|
| 888 |
+
self,
|
| 889 |
+
model,
|
| 890 |
+
shape,
|
| 891 |
+
img,
|
| 892 |
+
mode='default',
|
| 893 |
+
org=None,
|
| 894 |
+
noise=None,
|
| 895 |
+
clip_denoised=True,
|
| 896 |
+
denoised_fn=None,
|
| 897 |
+
cond_fn=None,
|
| 898 |
+
model_kwargs=None,
|
| 899 |
+
device=None,
|
| 900 |
+
noise_level=1000, # must be same as in training
|
| 901 |
+
progress=False,
|
| 902 |
+
conditioning=False,
|
| 903 |
+
conditioner=None,
|
| 904 |
+
classifier=None,
|
| 905 |
+
eta=0.0,
|
| 906 |
+
sampling_steps=0,
|
| 907 |
+
):
|
| 908 |
+
if device is None:
|
| 909 |
+
device = next(model.parameters()).device
|
| 910 |
+
assert isinstance(shape, (tuple, list))
|
| 911 |
+
b = shape[0]
|
| 912 |
+
t = th.randint(0,1, (b,), device=device).long().to(device)
|
| 913 |
+
img = img.to(device)
|
| 914 |
+
|
| 915 |
+
indices = list(range(t))[::-1]
|
| 916 |
+
if mode == 'segmentation':
|
| 917 |
+
noise = None
|
| 918 |
+
x_noisy = None
|
| 919 |
+
elif mode == 'default':
|
| 920 |
+
noise = None
|
| 921 |
+
x_noisy = None
|
| 922 |
+
else:
|
| 923 |
+
raise NotImplementedError(f'mode "{mode}" not implemented')
|
| 924 |
+
|
| 925 |
+
final = None
|
| 926 |
+
# pass images to be segmented as condition
|
| 927 |
+
for sample in self.ddim_sample_loop_progressive(
|
| 928 |
+
model,
|
| 929 |
+
shape,
|
| 930 |
+
segmentation_img=img, # image to be segmented
|
| 931 |
+
time=noise_level,
|
| 932 |
+
noise=x_noisy,
|
| 933 |
+
clip_denoised=clip_denoised,
|
| 934 |
+
denoised_fn=denoised_fn,
|
| 935 |
+
cond_fn=cond_fn,
|
| 936 |
+
model_kwargs=model_kwargs,
|
| 937 |
+
device=device,
|
| 938 |
+
progress=progress,
|
| 939 |
+
eta=eta,
|
| 940 |
+
sampling_steps=sampling_steps,
|
| 941 |
+
):
|
| 942 |
+
final = sample
|
| 943 |
+
|
| 944 |
+
return final["sample"], x_noisy, img
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
def ddim_sample_loop_progressive(
|
| 948 |
+
self,
|
| 949 |
+
model,
|
| 950 |
+
shape,
|
| 951 |
+
segmentation_img=None, # define to perform segmentation
|
| 952 |
+
time=1000,
|
| 953 |
+
noise=None,
|
| 954 |
+
clip_denoised=True,
|
| 955 |
+
denoised_fn=None,
|
| 956 |
+
cond_fn=None,
|
| 957 |
+
model_kwargs=None,
|
| 958 |
+
device=None,
|
| 959 |
+
progress=False,
|
| 960 |
+
eta=0.0,
|
| 961 |
+
sampling_steps=0,
|
| 962 |
+
):
|
| 963 |
+
"""
|
| 964 |
+
Use DDIM to sample from the model and yield intermediate samples from
|
| 965 |
+
each timestep of DDIM.
|
| 966 |
+
|
| 967 |
+
Same usage as p_sample_loop_progressive().
|
| 968 |
+
"""
|
| 969 |
+
if device is None:
|
| 970 |
+
device = next(model.parameters()).device
|
| 971 |
+
assert isinstance(shape, (tuple, list))
|
| 972 |
+
if noise is not None:
|
| 973 |
+
img = noise
|
| 974 |
+
else:
|
| 975 |
+
if segmentation_img is None: # normal sampling
|
| 976 |
+
img = th.randn(*shape, device=device)
|
| 977 |
+
else: # segmentation mode
|
| 978 |
+
label_shape = (segmentation_img.shape[0], model.out_channels, *segmentation_img.shape[2:])
|
| 979 |
+
img = th.randn(label_shape, dtype=segmentation_img.dtype, device=segmentation_img.device)
|
| 980 |
+
|
| 981 |
+
indices = list(range(time))[::-1] # klappt nur für batch_size == 1
|
| 982 |
+
|
| 983 |
+
|
| 984 |
+
if sampling_steps:
|
| 985 |
+
tmp = np.linspace(999, 0, sampling_steps)
|
| 986 |
+
tmp = np.append(tmp, -tmp[-2])
|
| 987 |
+
indices = tmp[:-1].round().astype(np.int)
|
| 988 |
+
indices_prev = tmp[1:].round().astype(np.int)
|
| 989 |
+
else:
|
| 990 |
+
indices_prev = [i-1 for i in indices]
|
| 991 |
+
|
| 992 |
+
if True: #progress:
|
| 993 |
+
# Lazy import so that we don't depend on tqdm.
|
| 994 |
+
from tqdm.auto import tqdm
|
| 995 |
+
|
| 996 |
+
indices = tqdm(indices)
|
| 997 |
+
|
| 998 |
+
for i, i_prev in zip(indices, indices_prev): # 1000 -> 0
|
| 999 |
+
if segmentation_img is not None:
|
| 1000 |
+
prev_img = img
|
| 1001 |
+
img = th.cat((segmentation_img, img), dim=1)
|
| 1002 |
+
t = th.tensor([i] * shape[0], device=device)
|
| 1003 |
+
t_prev = th.tensor([i_prev] * shape[0], device=device)
|
| 1004 |
+
with th.no_grad():
|
| 1005 |
+
out = self.ddim_sample(
|
| 1006 |
+
model,
|
| 1007 |
+
img,
|
| 1008 |
+
t,
|
| 1009 |
+
t_cpu=i,
|
| 1010 |
+
t_prev=t_prev,
|
| 1011 |
+
t_prev_cpu=i_prev,
|
| 1012 |
+
clip_denoised=clip_denoised,
|
| 1013 |
+
denoised_fn=denoised_fn,
|
| 1014 |
+
cond_fn=cond_fn,
|
| 1015 |
+
model_kwargs=model_kwargs,
|
| 1016 |
+
eta=eta,
|
| 1017 |
+
sampling_steps=sampling_steps,
|
| 1018 |
+
)
|
| 1019 |
+
yield out
|
| 1020 |
+
img = out["sample"]
|
| 1021 |
+
|
| 1022 |
+
def _vb_terms_bpd(
|
| 1023 |
+
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
|
| 1024 |
+
):
|
| 1025 |
+
"""
|
| 1026 |
+
Get a term for the variational lower-bound.
|
| 1027 |
+
|
| 1028 |
+
The resulting units are bits (rather than nats, as one might expect).
|
| 1029 |
+
This allows for comparison to other papers.
|
| 1030 |
+
|
| 1031 |
+
:return: a dict with the following keys:
|
| 1032 |
+
- 'output': a shape [N] tensor of NLLs or KLs.
|
| 1033 |
+
- 'pred_xstart': the x_0 predictions.
|
| 1034 |
+
"""
|
| 1035 |
+
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
|
| 1036 |
+
x_start=x_start, x_t=x_t, t=t
|
| 1037 |
+
)
|
| 1038 |
+
out = self.p_mean_variance(
|
| 1039 |
+
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
|
| 1040 |
+
)
|
| 1041 |
+
kl = normal_kl(
|
| 1042 |
+
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
|
| 1043 |
+
)
|
| 1044 |
+
kl = mean_flat(kl) / np.log(2.0)
|
| 1045 |
+
|
| 1046 |
+
decoder_nll = -discretized_gaussian_log_likelihood(
|
| 1047 |
+
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
|
| 1048 |
+
)
|
| 1049 |
+
assert decoder_nll.shape == x_start.shape
|
| 1050 |
+
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
|
| 1051 |
+
|
| 1052 |
+
# At the first timestep return the decoder NLL,
|
| 1053 |
+
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
|
| 1054 |
+
output = th.where((t == 0), decoder_nll, kl)
|
| 1055 |
+
return {"output": output, "pred_xstart": out["pred_xstart"]}
|
| 1056 |
+
|
| 1057 |
+
def training_losses(self, model, x_start, t, classifier=None, model_kwargs=None, noise=None, labels=None,
|
| 1058 |
+
mode='default'):
|
| 1059 |
+
"""
|
| 1060 |
+
Compute training losses for a single timestep.
|
| 1061 |
+
:param model: the model to evaluate loss on.
|
| 1062 |
+
:param x_start: the [N x C x ...] tensor of inputs - original image resolution.
|
| 1063 |
+
:param t: a batch of timestep indices.
|
| 1064 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
| 1065 |
+
pass to the model. This can be used for conditioning.
|
| 1066 |
+
:param noise: if specified, the specific Gaussian noise to try to remove.
|
| 1067 |
+
:param labels: must be specified for mode='segmentation'
|
| 1068 |
+
:param mode: can be default (image generation), segmentation
|
| 1069 |
+
:return: a dict with the key "loss" containing a tensor of shape [N].
|
| 1070 |
+
Some mean or variance settings may also have other keys.
|
| 1071 |
+
"""
|
| 1072 |
+
if model_kwargs is None:
|
| 1073 |
+
model_kwargs = {}
|
| 1074 |
+
|
| 1075 |
+
# Wavelet transform the input image
|
| 1076 |
+
LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(x_start)
|
| 1077 |
+
x_start_dwt = th.cat([LLL / 3., LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1)
|
| 1078 |
+
|
| 1079 |
+
if mode == 'default':
|
| 1080 |
+
noise = th.randn_like(x_start)
|
| 1081 |
+
LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(noise)
|
| 1082 |
+
noise_dwt = th.cat([LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1)
|
| 1083 |
+
x_t = self.q_sample(x_start_dwt, t, noise=noise_dwt)
|
| 1084 |
+
|
| 1085 |
+
elif mode == 'inpaint':
|
| 1086 |
+
mask = model_kwargs.get('mask')
|
| 1087 |
+
if mask is None:
|
| 1088 |
+
raise ValueError('mask must be provided for inpaint mode')
|
| 1089 |
+
# remove mask so it is not forwarded to the model
|
| 1090 |
+
model_kwargs = {k: v for k, v in model_kwargs.items() if k != 'mask'}
|
| 1091 |
+
|
| 1092 |
+
LLLm, LLHm, LHLm, LHHm, HLLm, HLHm, HHLm, HHHm = dwt(mask)
|
| 1093 |
+
mask_dwt = th.cat([LLLm, LLHm, LHLm, LHHm, HLLm, HLHm, HHLm, HHHm], dim=1)
|
| 1094 |
+
mask_rep = mask_dwt.repeat(1, x_start.shape[1], 1, 1, 1)
|
| 1095 |
+
|
| 1096 |
+
noise = th.randn_like(x_start)
|
| 1097 |
+
LLLn, LLHn, LHLn, LHHn, HLLn, HLHn, HHLn, HHHn = dwt(noise)
|
| 1098 |
+
noise_dwt = th.cat([LLLn, LLHn, LHLn, LHHn, HLLn, HLHn, HHLn, HHHn], dim=1)
|
| 1099 |
+
x_t_noisy = self.q_sample(x_start_dwt, t, noise=noise_dwt)
|
| 1100 |
+
x_ctx = x_start_dwt * (1 - mask_rep)
|
| 1101 |
+
x_t = x_ctx + x_t_noisy * mask_rep
|
| 1102 |
+
else:
|
| 1103 |
+
raise ValueError(f'Invalid mode {mode=}, needs to be "default" or "inpaint"')
|
| 1104 |
+
|
| 1105 |
+
if mode == 'inpaint':
|
| 1106 |
+
model_in = th.cat([x_t, mask_rep], dim=1)
|
| 1107 |
+
else:
|
| 1108 |
+
model_in = x_t
|
| 1109 |
+
model_output = model(model_in, self._scale_timesteps(t), **model_kwargs) # Model outputs denoised wavelet subbands
|
| 1110 |
+
|
| 1111 |
+
# Inverse wavelet transform the model output
|
| 1112 |
+
B, _, H, W, D = model_output.size()
|
| 1113 |
+
model_output_idwt = idwt(model_output[:, 0, :, :, :].view(B, 1, H, W, D) * 3.,
|
| 1114 |
+
model_output[:, 1, :, :, :].view(B, 1, H, W, D),
|
| 1115 |
+
model_output[:, 2, :, :, :].view(B, 1, H, W, D),
|
| 1116 |
+
model_output[:, 3, :, :, :].view(B, 1, H, W, D),
|
| 1117 |
+
model_output[:, 4, :, :, :].view(B, 1, H, W, D),
|
| 1118 |
+
model_output[:, 5, :, :, :].view(B, 1, H, W, D),
|
| 1119 |
+
model_output[:, 6, :, :, :].view(B, 1, H, W, D),
|
| 1120 |
+
model_output[:, 7, :, :, :].view(B, 1, H, W, D))
|
| 1121 |
+
|
| 1122 |
+
if mode == 'inpaint':
|
| 1123 |
+
diff = (x_start_dwt - model_output) * mask_rep
|
| 1124 |
+
mse = mean_flat(diff ** 2) / (mean_flat(mask_rep) + 1e-8)
|
| 1125 |
+
terms = {"mse_wav": th.mean(mse, dim=0)}
|
| 1126 |
+
else:
|
| 1127 |
+
terms = {"mse_wav": th.mean(mean_flat((x_start_dwt - model_output) ** 2), dim=0)}
|
| 1128 |
+
|
| 1129 |
+
return terms, model_output, model_output_idwt
|
| 1130 |
+
|
| 1131 |
+
|
| 1132 |
+
def _prior_bpd(self, x_start):
|
| 1133 |
+
"""
|
| 1134 |
+
Get the prior KL term for the variational lower-bound, measured in
|
| 1135 |
+
bits-per-dim.
|
| 1136 |
+
|
| 1137 |
+
This term can't be optimized, as it only depends on the encoder.
|
| 1138 |
+
|
| 1139 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
| 1140 |
+
:return: a batch of [N] KL values (in bits), one per batch element.
|
| 1141 |
+
"""
|
| 1142 |
+
batch_size = x_start.shape[0]
|
| 1143 |
+
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
| 1144 |
+
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
| 1145 |
+
kl_prior = normal_kl(
|
| 1146 |
+
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
|
| 1147 |
+
)
|
| 1148 |
+
return mean_flat(kl_prior) / np.log(2.0)
|
| 1149 |
+
|
| 1150 |
+
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
|
| 1151 |
+
"""
|
| 1152 |
+
Compute the entire variational lower-bound, measured in bits-per-dim,
|
| 1153 |
+
as well as other related quantities.
|
| 1154 |
+
|
| 1155 |
+
:param model: the model to evaluate loss on.
|
| 1156 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
| 1157 |
+
:param clip_denoised: if True, clip denoised samples.
|
| 1158 |
+
:param model_kwargs: if not None, a dict of extra keyword arguments to
|
| 1159 |
+
pass to the model. This can be used for conditioning.
|
| 1160 |
+
|
| 1161 |
+
:return: a dict containing the following keys:
|
| 1162 |
+
- total_bpd: the total variational lower-bound, per batch element.
|
| 1163 |
+
- prior_bpd: the prior term in the lower-bound.
|
| 1164 |
+
- vb: an [N x T] tensor of terms in the lower-bound.
|
| 1165 |
+
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
|
| 1166 |
+
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
|
| 1167 |
+
"""
|
| 1168 |
+
device = x_start.device
|
| 1169 |
+
batch_size = x_start.shape[0]
|
| 1170 |
+
|
| 1171 |
+
vb = []
|
| 1172 |
+
xstart_mse = []
|
| 1173 |
+
mse = []
|
| 1174 |
+
for t in list(range(self.num_timesteps))[::-1]:
|
| 1175 |
+
t_batch = th.tensor([t] * batch_size, device=device)
|
| 1176 |
+
noise = th.randn_like(x_start)
|
| 1177 |
+
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
|
| 1178 |
+
|
| 1179 |
+
# Calculate VLB term at the current timestep
|
| 1180 |
+
with th.no_grad():
|
| 1181 |
+
out = self._vb_terms_bptimestepsd(
|
| 1182 |
+
model,
|
| 1183 |
+
x_start=x_start,
|
| 1184 |
+
x_t=x_t,
|
| 1185 |
+
t=t_batch,
|
| 1186 |
+
clip_denoised=clip_denoised,
|
| 1187 |
+
model_kwargs=model_kwargs,
|
| 1188 |
+
)
|
| 1189 |
+
vb.append(out["output"])
|
| 1190 |
+
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
|
| 1191 |
+
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
|
| 1192 |
+
mse.append(mean_flat((eps - noise) ** 2))
|
| 1193 |
+
|
| 1194 |
+
vb = th.stack(vb, dim=1)
|
| 1195 |
+
xstart_mse = th.stack(xstart_mse, dim=1)
|
| 1196 |
+
mse = th.stack(mse, dim=1)
|
| 1197 |
+
|
| 1198 |
+
prior_bpd = self._prior_bpd(x_start)
|
| 1199 |
+
total_bpd = vb.sum(dim=1) + prior_bpd
|
| 1200 |
+
return {
|
| 1201 |
+
"total_bpd": total_bpd,
|
| 1202 |
+
"prior_bpd": prior_bpd,
|
| 1203 |
+
"vb": vb,
|
| 1204 |
+
"xstart_mse": xstart_mse,
|
| 1205 |
+
"mse": mse,
|
| 1206 |
+
}
|
| 1207 |
+
|
| 1208 |
+
|
| 1209 |
+
def _extract_into_tensor(arr, timesteps, broadcast_shape):
|
| 1210 |
+
"""
|
| 1211 |
+
Extract values from a 1-D numpy array for a batch of indices.
|
| 1212 |
+
|
| 1213 |
+
:param arr: the 1-D numpy array.
|
| 1214 |
+
:param timesteps: a tensor of indices into the array to extract.
|
| 1215 |
+
:param broadcast_shape: a larger shape of K dimensions with the batch
|
| 1216 |
+
dimension equal to the length of timesteps.
|
| 1217 |
+
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
|
| 1218 |
+
"""
|
| 1219 |
+
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
|
| 1220 |
+
while len(res.shape) < len(broadcast_shape):
|
| 1221 |
+
res = res[..., None]
|
| 1222 |
+
return res.expand(broadcast_shape)
|
guided_diffusion/inpaintloader.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import nibabel as nib
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from torch.utils.data import Dataset
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class InpaintVolumes(Dataset):
|
| 12 |
+
"""Dataset returning MRI volumes and inpainting masks."""
|
| 13 |
+
|
| 14 |
+
def __init__(
|
| 15 |
+
self,
|
| 16 |
+
root_dir: str,
|
| 17 |
+
subset: str = "train",
|
| 18 |
+
img_size: int = 256,
|
| 19 |
+
modalities: tuple = ("T1w",),
|
| 20 |
+
normalize=None,
|
| 21 |
+
):
|
| 22 |
+
super().__init__()
|
| 23 |
+
self.root_dir = os.path.expanduser(root_dir)
|
| 24 |
+
self.subset = subset
|
| 25 |
+
self.img_size = img_size
|
| 26 |
+
self.modalities = modalities
|
| 27 |
+
self.normalize = normalize or (lambda x: x)
|
| 28 |
+
self.cases = self._index_cases()
|
| 29 |
+
|
| 30 |
+
# ------------------------------------------------------------
|
| 31 |
+
def _index_cases(self):
|
| 32 |
+
"""Collect file paths for all cases."""
|
| 33 |
+
df = pd.read_csv(f"{self.root_dir}/participants.tsv", sep="\t")
|
| 34 |
+
|
| 35 |
+
# assign train/val split for FCD subjects
|
| 36 |
+
fcd_df = df[df["group"] == "fcd"].copy()
|
| 37 |
+
fcd_df = fcd_df.sample(frac=1, random_state=42).reset_index(drop=True)
|
| 38 |
+
n_train = int(len(fcd_df) * 0.9)
|
| 39 |
+
fcd_df.loc[: n_train - 1, "split"] = "train"
|
| 40 |
+
fcd_df.loc[n_train:, "split"] = "val"
|
| 41 |
+
df.loc[fcd_df.index, "split"] = fcd_df["split"]
|
| 42 |
+
|
| 43 |
+
cases = []
|
| 44 |
+
for pid in df[(df["split"] == self.subset) & (df["group"] == "fcd")][
|
| 45 |
+
"participant_id"
|
| 46 |
+
]:
|
| 47 |
+
case_dir = os.path.join(self.root_dir, pid, "anat")
|
| 48 |
+
files = os.listdir(case_dir)
|
| 49 |
+
img_dict = {}
|
| 50 |
+
for mod in self.modalities:
|
| 51 |
+
pattern = re.compile(rf"^{re.escape(pid)}.*{re.escape(mod)}\.nii\.gz$")
|
| 52 |
+
matches = [f for f in files if pattern.match(f)]
|
| 53 |
+
if not matches:
|
| 54 |
+
raise FileNotFoundError(f"Missing {mod} for {pid} in {case_dir}")
|
| 55 |
+
img_dict[mod] = os.path.join(case_dir, matches[0])
|
| 56 |
+
|
| 57 |
+
mask_matches = [
|
| 58 |
+
f for f in files if re.match(rf"^{re.escape(pid)}.*roi\.nii\.gz$", f)
|
| 59 |
+
]
|
| 60 |
+
if not mask_matches:
|
| 61 |
+
raise FileNotFoundError(f"Missing mask for {pid} in {case_dir}")
|
| 62 |
+
mask_path = os.path.join(case_dir, mask_matches[0])
|
| 63 |
+
cases.append({"img": img_dict, "mask": mask_path, "name": pid})
|
| 64 |
+
return cases
|
| 65 |
+
|
| 66 |
+
# ------------------------------------------------------------
|
| 67 |
+
def _pad_to_cube(self, vol, fill=0.0):
|
| 68 |
+
"""Symmetric 3-D pad to [img_size^3]."""
|
| 69 |
+
D, H, W = vol.shape[-3:]
|
| 70 |
+
pad_D, pad_H, pad_W = (
|
| 71 |
+
self.img_size - D,
|
| 72 |
+
self.img_size - H,
|
| 73 |
+
self.img_size - W,
|
| 74 |
+
)
|
| 75 |
+
pad = (
|
| 76 |
+
pad_W // 2,
|
| 77 |
+
pad_W - pad_W // 2,
|
| 78 |
+
pad_H // 2,
|
| 79 |
+
pad_H - pad_H // 2,
|
| 80 |
+
pad_D // 2,
|
| 81 |
+
pad_D - pad_D // 2,
|
| 82 |
+
)
|
| 83 |
+
return nn.functional.pad(vol, pad, value=fill)
|
| 84 |
+
|
| 85 |
+
# ------------------------------------------------------------
|
| 86 |
+
def __getitem__(self, idx):
|
| 87 |
+
rec = self.cases[idx]
|
| 88 |
+
name = rec["name"]
|
| 89 |
+
|
| 90 |
+
vols = []
|
| 91 |
+
for mod in self.modalities:
|
| 92 |
+
arr = (
|
| 93 |
+
nib.load(rec["img"][mod]).get_fdata().astype(np.float32)
|
| 94 |
+
)
|
| 95 |
+
lo, hi = np.quantile(arr, [0.001, 0.999])
|
| 96 |
+
arr = np.clip(arr, lo, hi)
|
| 97 |
+
arr = (arr - lo) / (hi - lo + 1e-6)
|
| 98 |
+
vols.append(torch.from_numpy(arr))
|
| 99 |
+
first_mod = self.modalities[0]
|
| 100 |
+
affine = nib.load(rec["img"][first_mod]).affine
|
| 101 |
+
Y = torch.stack(vols, dim=0)
|
| 102 |
+
|
| 103 |
+
mask_arr = nib.load(rec["mask"]).get_fdata().astype(np.uint8)
|
| 104 |
+
M = torch.from_numpy(mask_arr).unsqueeze(0)
|
| 105 |
+
M = (M > 0).to(Y.dtype)
|
| 106 |
+
|
| 107 |
+
Y = self._pad_to_cube(Y, fill=0.0)
|
| 108 |
+
M = self._pad_to_cube(M, fill=0.0)
|
| 109 |
+
if self.img_size == 128:
|
| 110 |
+
pool = nn.AvgPool3d(2, 2)
|
| 111 |
+
Y = pool(Y)
|
| 112 |
+
M = pool(M)
|
| 113 |
+
|
| 114 |
+
Y_void = Y * (1 - M)
|
| 115 |
+
return Y, M, Y_void, name, affine
|
| 116 |
+
|
| 117 |
+
# ------------------------------------------------------------
|
| 118 |
+
def __len__(self):
|
| 119 |
+
return len(self.cases)
|
guided_diffusion/lidcloader.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.utils.data
|
| 4 |
+
import os
|
| 5 |
+
import os.path
|
| 6 |
+
import nibabel
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class LIDCVolumes(torch.utils.data.Dataset):
|
| 10 |
+
def __init__(self, directory, test_flag=False, normalize=None, mode='train', img_size=256):
|
| 11 |
+
'''
|
| 12 |
+
directory is expected to contain some folder structure:
|
| 13 |
+
if some subfolder contains only files, all of these
|
| 14 |
+
files are assumed to have the name: processed.nii.gz
|
| 15 |
+
'''
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.mode = mode
|
| 18 |
+
self.directory = os.path.expanduser(directory)
|
| 19 |
+
self.normalize = normalize or (lambda x: x)
|
| 20 |
+
self.test_flag = test_flag
|
| 21 |
+
self.img_size = img_size
|
| 22 |
+
self.database = []
|
| 23 |
+
|
| 24 |
+
if not self.mode == 'fake':
|
| 25 |
+
for root, dirs, files in os.walk(self.directory):
|
| 26 |
+
# if there are no subdirs, we have a datadir
|
| 27 |
+
if not dirs:
|
| 28 |
+
files.sort()
|
| 29 |
+
datapoint = dict()
|
| 30 |
+
# extract all files as channels
|
| 31 |
+
for f in files:
|
| 32 |
+
datapoint['image'] = os.path.join(root, f)
|
| 33 |
+
if len(datapoint) != 0:
|
| 34 |
+
self.database.append(datapoint)
|
| 35 |
+
else:
|
| 36 |
+
for root, dirs, files in os.walk(self.directory):
|
| 37 |
+
for f in files:
|
| 38 |
+
datapoint = dict()
|
| 39 |
+
datapoint['image'] = os.path.join(root, f)
|
| 40 |
+
self.database.append(datapoint)
|
| 41 |
+
|
| 42 |
+
def __getitem__(self, x):
|
| 43 |
+
filedict = self.database[x]
|
| 44 |
+
name = filedict['image']
|
| 45 |
+
nib_img = nibabel.load(name)
|
| 46 |
+
out = nib_img.get_fdata()
|
| 47 |
+
|
| 48 |
+
if not self.mode == 'fake':
|
| 49 |
+
out = torch.Tensor(out)
|
| 50 |
+
|
| 51 |
+
image = torch.zeros(1, 256, 256, 256)
|
| 52 |
+
image[:, :, :, :] = out
|
| 53 |
+
|
| 54 |
+
if self.img_size == 128:
|
| 55 |
+
downsample = nn.AvgPool3d(kernel_size=2, stride=2)
|
| 56 |
+
image = downsample(image)
|
| 57 |
+
else:
|
| 58 |
+
image = torch.tensor(out, dtype=torch.float32)
|
| 59 |
+
image = image.unsqueeze(dim=0)
|
| 60 |
+
|
| 61 |
+
# normalization
|
| 62 |
+
image = self.normalize(image)
|
| 63 |
+
|
| 64 |
+
if self.mode == 'fake':
|
| 65 |
+
return image, name
|
| 66 |
+
else:
|
| 67 |
+
return image
|
| 68 |
+
|
| 69 |
+
def __len__(self):
|
| 70 |
+
return len(self.database)
|
guided_diffusion/logger.py
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
|
| 3 |
+
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import shutil
|
| 9 |
+
import os.path as osp
|
| 10 |
+
import json
|
| 11 |
+
import time
|
| 12 |
+
import datetime
|
| 13 |
+
import tempfile
|
| 14 |
+
import warnings
|
| 15 |
+
from collections import defaultdict
|
| 16 |
+
from contextlib import contextmanager
|
| 17 |
+
|
| 18 |
+
DEBUG = 10
|
| 19 |
+
INFO = 20
|
| 20 |
+
WARN = 30
|
| 21 |
+
ERROR = 40
|
| 22 |
+
|
| 23 |
+
DISABLED = 50
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class KVWriter(object):
|
| 27 |
+
def writekvs(self, kvs):
|
| 28 |
+
raise NotImplementedError
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class SeqWriter(object):
|
| 32 |
+
def writeseq(self, seq):
|
| 33 |
+
raise NotImplementedError
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class HumanOutputFormat(KVWriter, SeqWriter):
|
| 37 |
+
def __init__(self, filename_or_file):
|
| 38 |
+
if isinstance(filename_or_file, str):
|
| 39 |
+
self.file = open(filename_or_file, "wt")
|
| 40 |
+
self.own_file = True
|
| 41 |
+
else:
|
| 42 |
+
assert hasattr(filename_or_file, "read"), (
|
| 43 |
+
"expected file or str, got %s" % filename_or_file
|
| 44 |
+
)
|
| 45 |
+
self.file = filename_or_file
|
| 46 |
+
self.own_file = False
|
| 47 |
+
|
| 48 |
+
def writekvs(self, kvs):
|
| 49 |
+
# Create strings for printing
|
| 50 |
+
key2str = {}
|
| 51 |
+
for (key, val) in sorted(kvs.items()):
|
| 52 |
+
if hasattr(val, "__float__"):
|
| 53 |
+
valstr = "%-8.3g" % val
|
| 54 |
+
else:
|
| 55 |
+
valstr = str(val)
|
| 56 |
+
key2str[self._truncate(key)] = self._truncate(valstr)
|
| 57 |
+
|
| 58 |
+
# Find max widths
|
| 59 |
+
if len(key2str) == 0:
|
| 60 |
+
print("WARNING: tried to write empty key-value dict")
|
| 61 |
+
return
|
| 62 |
+
else:
|
| 63 |
+
keywidth = max(map(len, key2str.keys()))
|
| 64 |
+
valwidth = max(map(len, key2str.values()))
|
| 65 |
+
|
| 66 |
+
# Write out the data
|
| 67 |
+
dashes = "-" * (keywidth + valwidth + 7)
|
| 68 |
+
lines = [dashes]
|
| 69 |
+
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
|
| 70 |
+
lines.append(
|
| 71 |
+
"| %s%s | %s%s |"
|
| 72 |
+
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
|
| 73 |
+
)
|
| 74 |
+
lines.append(dashes)
|
| 75 |
+
self.file.write("\n".join(lines) + "\n")
|
| 76 |
+
|
| 77 |
+
# Flush the output to the file
|
| 78 |
+
self.file.flush()
|
| 79 |
+
|
| 80 |
+
def _truncate(self, s):
|
| 81 |
+
maxlen = 30
|
| 82 |
+
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
|
| 83 |
+
|
| 84 |
+
def writeseq(self, seq):
|
| 85 |
+
seq = list(seq)
|
| 86 |
+
for (i, elem) in enumerate(seq):
|
| 87 |
+
self.file.write(elem)
|
| 88 |
+
if i < len(seq) - 1: # add space unless this is the last one
|
| 89 |
+
self.file.write(" ")
|
| 90 |
+
self.file.write("\n")
|
| 91 |
+
self.file.flush()
|
| 92 |
+
|
| 93 |
+
def close(self):
|
| 94 |
+
if self.own_file:
|
| 95 |
+
self.file.close()
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class JSONOutputFormat(KVWriter):
|
| 99 |
+
def __init__(self, filename):
|
| 100 |
+
self.file = open(filename, "wt")
|
| 101 |
+
|
| 102 |
+
def writekvs(self, kvs):
|
| 103 |
+
for k, v in sorted(kvs.items()):
|
| 104 |
+
if hasattr(v, "dtype"):
|
| 105 |
+
kvs[k] = float(v)
|
| 106 |
+
self.file.write(json.dumps(kvs) + "\n")
|
| 107 |
+
self.file.flush()
|
| 108 |
+
|
| 109 |
+
def close(self):
|
| 110 |
+
self.file.close()
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class CSVOutputFormat(KVWriter):
|
| 114 |
+
def __init__(self, filename):
|
| 115 |
+
self.file = open(filename, "w+t")
|
| 116 |
+
self.keys = []
|
| 117 |
+
self.sep = ","
|
| 118 |
+
|
| 119 |
+
def writekvs(self, kvs):
|
| 120 |
+
# Add our current row to the history
|
| 121 |
+
extra_keys = list(kvs.keys() - self.keys)
|
| 122 |
+
extra_keys.sort()
|
| 123 |
+
if extra_keys:
|
| 124 |
+
self.keys.extend(extra_keys)
|
| 125 |
+
self.file.seek(0)
|
| 126 |
+
lines = self.file.readlines()
|
| 127 |
+
self.file.seek(0)
|
| 128 |
+
for (i, k) in enumerate(self.keys):
|
| 129 |
+
if i > 0:
|
| 130 |
+
self.file.write(",")
|
| 131 |
+
self.file.write(k)
|
| 132 |
+
self.file.write("\n")
|
| 133 |
+
for line in lines[1:]:
|
| 134 |
+
self.file.write(line[:-1])
|
| 135 |
+
self.file.write(self.sep * len(extra_keys))
|
| 136 |
+
self.file.write("\n")
|
| 137 |
+
for (i, k) in enumerate(self.keys):
|
| 138 |
+
if i > 0:
|
| 139 |
+
self.file.write(",")
|
| 140 |
+
v = kvs.get(k)
|
| 141 |
+
if v is not None:
|
| 142 |
+
self.file.write(str(v))
|
| 143 |
+
self.file.write("\n")
|
| 144 |
+
self.file.flush()
|
| 145 |
+
|
| 146 |
+
def close(self):
|
| 147 |
+
self.file.close()
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class TensorBoardOutputFormat(KVWriter):
|
| 151 |
+
"""
|
| 152 |
+
Dumps key/value pairs into TensorBoard's numeric format.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
def __init__(self, dir):
|
| 156 |
+
os.makedirs(dir, exist_ok=True)
|
| 157 |
+
self.dir = dir
|
| 158 |
+
self.step = 1
|
| 159 |
+
prefix = "events"
|
| 160 |
+
path = osp.join(osp.abspath(dir), prefix)
|
| 161 |
+
import tensorflow as tf
|
| 162 |
+
from tensorflow.python import pywrap_tensorflow
|
| 163 |
+
from tensorflow.core.util import event_pb2
|
| 164 |
+
from tensorflow.python.util import compat
|
| 165 |
+
|
| 166 |
+
self.tf = tf
|
| 167 |
+
self.event_pb2 = event_pb2
|
| 168 |
+
self.pywrap_tensorflow = pywrap_tensorflow
|
| 169 |
+
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
|
| 170 |
+
|
| 171 |
+
def writekvs(self, kvs):
|
| 172 |
+
def summary_val(k, v):
|
| 173 |
+
kwargs = {"tag": k, "simple_value": float(v)}
|
| 174 |
+
return self.tf.Summary.Value(**kwargs)
|
| 175 |
+
|
| 176 |
+
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
|
| 177 |
+
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
|
| 178 |
+
event.step = (
|
| 179 |
+
self.step
|
| 180 |
+
) # is there any reason why you'd want to specify the step?
|
| 181 |
+
self.writer.WriteEvent(event)
|
| 182 |
+
self.writer.Flush()
|
| 183 |
+
self.step += 1
|
| 184 |
+
|
| 185 |
+
def close(self):
|
| 186 |
+
if self.writer:
|
| 187 |
+
self.writer.Close()
|
| 188 |
+
self.writer = None
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def make_output_format(format, ev_dir, log_suffix=""):
|
| 192 |
+
os.makedirs(ev_dir, exist_ok=True)
|
| 193 |
+
if format == "stdout":
|
| 194 |
+
return HumanOutputFormat(sys.stdout)
|
| 195 |
+
elif format == "log":
|
| 196 |
+
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
|
| 197 |
+
elif format == "json":
|
| 198 |
+
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
|
| 199 |
+
elif format == "csv":
|
| 200 |
+
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
|
| 201 |
+
elif format == "tensorboard":
|
| 202 |
+
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
|
| 203 |
+
else:
|
| 204 |
+
raise ValueError("Unknown format specified: %s" % (format,))
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
# ================================================================
|
| 208 |
+
# API
|
| 209 |
+
# ================================================================
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def logkv(key, val):
|
| 213 |
+
"""
|
| 214 |
+
Log a value of some diagnostic
|
| 215 |
+
Call this once for each diagnostic quantity, each iteration
|
| 216 |
+
If called many times, last value will be used.
|
| 217 |
+
"""
|
| 218 |
+
get_current().logkv(key, val)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def logkv_mean(key, val):
|
| 222 |
+
"""
|
| 223 |
+
The same as logkv(), but if called many times, values averaged.
|
| 224 |
+
"""
|
| 225 |
+
get_current().logkv_mean(key, val)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def logkvs(d):
|
| 229 |
+
"""
|
| 230 |
+
Log a dictionary of key-value pairs
|
| 231 |
+
"""
|
| 232 |
+
for (k, v) in d.items():
|
| 233 |
+
logkv(k, v)
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def dumpkvs():
|
| 237 |
+
"""
|
| 238 |
+
Write all of the diagnostics from the current iteration
|
| 239 |
+
"""
|
| 240 |
+
return get_current().dumpkvs()
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def getkvs():
|
| 244 |
+
return get_current().name2val
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def log(*args, level=INFO):
|
| 248 |
+
"""
|
| 249 |
+
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
|
| 250 |
+
"""
|
| 251 |
+
get_current().log(*args, level=level)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def debug(*args):
|
| 255 |
+
log(*args, level=DEBUG)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def info(*args):
|
| 259 |
+
log(*args, level=INFO)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def warn(*args):
|
| 263 |
+
log(*args, level=WARN)
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def error(*args):
|
| 267 |
+
log(*args, level=ERROR)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def set_level(level):
|
| 271 |
+
"""
|
| 272 |
+
Set logging threshold on current logger.
|
| 273 |
+
"""
|
| 274 |
+
get_current().set_level(level)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def set_comm(comm):
|
| 278 |
+
get_current().set_comm(comm)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def get_dir():
|
| 282 |
+
"""
|
| 283 |
+
Get directory that log files are being written to.
|
| 284 |
+
will be None if there is no output directory (i.e., if you didn't call start)
|
| 285 |
+
"""
|
| 286 |
+
return get_current().get_dir()
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
record_tabular = logkv
|
| 290 |
+
dump_tabular = dumpkvs
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
@contextmanager
|
| 294 |
+
def profile_kv(scopename):
|
| 295 |
+
logkey = "wait_" + scopename
|
| 296 |
+
tstart = time.time()
|
| 297 |
+
try:
|
| 298 |
+
yield
|
| 299 |
+
finally:
|
| 300 |
+
get_current().name2val[logkey] += time.time() - tstart
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def profile(n):
|
| 304 |
+
"""
|
| 305 |
+
Usage:
|
| 306 |
+
@profile("my_func")
|
| 307 |
+
def my_func(): code
|
| 308 |
+
"""
|
| 309 |
+
|
| 310 |
+
def decorator_with_name(func):
|
| 311 |
+
def func_wrapper(*args, **kwargs):
|
| 312 |
+
with profile_kv(n):
|
| 313 |
+
return func(*args, **kwargs)
|
| 314 |
+
|
| 315 |
+
return func_wrapper
|
| 316 |
+
|
| 317 |
+
return decorator_with_name
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
# ================================================================
|
| 321 |
+
# Backend
|
| 322 |
+
# ================================================================
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def get_current():
|
| 326 |
+
if Logger.CURRENT is None:
|
| 327 |
+
_configure_default_logger()
|
| 328 |
+
|
| 329 |
+
return Logger.CURRENT
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
class Logger(object):
|
| 333 |
+
DEFAULT = None # A logger with no output files. (See right below class definition)
|
| 334 |
+
# So that you can still log to the terminal without setting up any output files
|
| 335 |
+
CURRENT = None # Current logger being used by the free functions above
|
| 336 |
+
|
| 337 |
+
def __init__(self, dir, output_formats, comm=None):
|
| 338 |
+
self.name2val = defaultdict(float) # values this iteration
|
| 339 |
+
self.name2cnt = defaultdict(int)
|
| 340 |
+
self.level = INFO
|
| 341 |
+
self.dir = dir
|
| 342 |
+
self.output_formats = output_formats
|
| 343 |
+
self.comm = comm
|
| 344 |
+
|
| 345 |
+
# Logging API, forwarded
|
| 346 |
+
# ----------------------------------------
|
| 347 |
+
def logkv(self, key, val):
|
| 348 |
+
self.name2val[key] = val
|
| 349 |
+
|
| 350 |
+
def logkv_mean(self, key, val):
|
| 351 |
+
oldval, cnt = self.name2val[key], self.name2cnt[key]
|
| 352 |
+
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
|
| 353 |
+
self.name2cnt[key] = cnt + 1
|
| 354 |
+
|
| 355 |
+
def dumpkvs(self):
|
| 356 |
+
if self.comm is None:
|
| 357 |
+
d = self.name2val
|
| 358 |
+
else:
|
| 359 |
+
d = mpi_weighted_mean(
|
| 360 |
+
self.comm,
|
| 361 |
+
{
|
| 362 |
+
name: (val, self.name2cnt.get(name, 1))
|
| 363 |
+
for (name, val) in self.name2val.items()
|
| 364 |
+
},
|
| 365 |
+
)
|
| 366 |
+
if self.comm.rank != 0:
|
| 367 |
+
d["dummy"] = 1 # so we don't get a warning about empty dict
|
| 368 |
+
out = d.copy() # Return the dict for unit testing purposes
|
| 369 |
+
for fmt in self.output_formats:
|
| 370 |
+
if isinstance(fmt, KVWriter):
|
| 371 |
+
fmt.writekvs(d)
|
| 372 |
+
self.name2val.clear()
|
| 373 |
+
self.name2cnt.clear()
|
| 374 |
+
return out
|
| 375 |
+
|
| 376 |
+
def log(self, *args, level=INFO):
|
| 377 |
+
if self.level <= level:
|
| 378 |
+
self._do_log(args)
|
| 379 |
+
|
| 380 |
+
# Configuration
|
| 381 |
+
# ----------------------------------------
|
| 382 |
+
def set_level(self, level):
|
| 383 |
+
self.level = level
|
| 384 |
+
|
| 385 |
+
def set_comm(self, comm):
|
| 386 |
+
self.comm = comm
|
| 387 |
+
|
| 388 |
+
def get_dir(self):
|
| 389 |
+
return self.dir
|
| 390 |
+
|
| 391 |
+
def close(self):
|
| 392 |
+
for fmt in self.output_formats:
|
| 393 |
+
fmt.close()
|
| 394 |
+
|
| 395 |
+
# Misc
|
| 396 |
+
# ----------------------------------------
|
| 397 |
+
def _do_log(self, args):
|
| 398 |
+
for fmt in self.output_formats:
|
| 399 |
+
if isinstance(fmt, SeqWriter):
|
| 400 |
+
fmt.writeseq(map(str, args))
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def get_rank_without_mpi_import():
|
| 404 |
+
# check environment variables here instead of importing mpi4py
|
| 405 |
+
# to avoid calling MPI_Init() when this module is imported
|
| 406 |
+
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
|
| 407 |
+
if varname in os.environ:
|
| 408 |
+
return int(os.environ[varname])
|
| 409 |
+
return 0
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def mpi_weighted_mean(comm, local_name2valcount):
|
| 413 |
+
"""
|
| 414 |
+
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
|
| 415 |
+
Perform a weighted average over dicts that are each on a different node
|
| 416 |
+
Input: local_name2valcount: dict mapping key -> (value, count)
|
| 417 |
+
Returns: key -> mean
|
| 418 |
+
"""
|
| 419 |
+
all_name2valcount = comm.gather(local_name2valcount)
|
| 420 |
+
if comm.rank == 0:
|
| 421 |
+
name2sum = defaultdict(float)
|
| 422 |
+
name2count = defaultdict(float)
|
| 423 |
+
for n2vc in all_name2valcount:
|
| 424 |
+
for (name, (val, count)) in n2vc.items():
|
| 425 |
+
try:
|
| 426 |
+
val = float(val)
|
| 427 |
+
except ValueError:
|
| 428 |
+
if comm.rank == 0:
|
| 429 |
+
warnings.warn(
|
| 430 |
+
"WARNING: tried to compute mean on non-float {}={}".format(
|
| 431 |
+
name, val
|
| 432 |
+
)
|
| 433 |
+
)
|
| 434 |
+
else:
|
| 435 |
+
name2sum[name] += val * count
|
| 436 |
+
name2count[name] += count
|
| 437 |
+
return {name: name2sum[name] / name2count[name] for name in name2sum}
|
| 438 |
+
else:
|
| 439 |
+
return {}
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def configure(dir='./results', format_strs=None, comm=None, log_suffix=""):
|
| 443 |
+
"""
|
| 444 |
+
If comm is provided, average all numerical stats across that comm
|
| 445 |
+
"""
|
| 446 |
+
if dir is None:
|
| 447 |
+
dir = os.getenv("OPENAI_LOGDIR")
|
| 448 |
+
if dir is None:
|
| 449 |
+
dir = osp.join(
|
| 450 |
+
tempfile.gettempdir(),
|
| 451 |
+
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
|
| 452 |
+
)
|
| 453 |
+
assert isinstance(dir, str)
|
| 454 |
+
dir = os.path.expanduser(dir)
|
| 455 |
+
os.makedirs(os.path.expanduser(dir), exist_ok=True)
|
| 456 |
+
|
| 457 |
+
rank = get_rank_without_mpi_import()
|
| 458 |
+
if rank > 0:
|
| 459 |
+
log_suffix = log_suffix + "-rank%03i" % rank
|
| 460 |
+
|
| 461 |
+
if format_strs is None:
|
| 462 |
+
if rank == 0:
|
| 463 |
+
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
|
| 464 |
+
else:
|
| 465 |
+
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
|
| 466 |
+
format_strs = filter(None, format_strs)
|
| 467 |
+
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
|
| 468 |
+
|
| 469 |
+
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
|
| 470 |
+
if output_formats:
|
| 471 |
+
log("Logging to %s" % dir)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def _configure_default_logger():
|
| 475 |
+
configure()
|
| 476 |
+
Logger.DEFAULT = Logger.CURRENT
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
def reset():
|
| 480 |
+
if Logger.CURRENT is not Logger.DEFAULT:
|
| 481 |
+
Logger.CURRENT.close()
|
| 482 |
+
Logger.CURRENT = Logger.DEFAULT
|
| 483 |
+
log("Reset logger")
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
@contextmanager
|
| 487 |
+
def scoped_configure(dir=None, format_strs=None, comm=None):
|
| 488 |
+
prevlogger = Logger.CURRENT
|
| 489 |
+
configure(dir=dir, format_strs=format_strs, comm=comm)
|
| 490 |
+
try:
|
| 491 |
+
yield
|
| 492 |
+
finally:
|
| 493 |
+
Logger.CURRENT.close()
|
| 494 |
+
Logger.CURRENT = prevlogger
|
| 495 |
+
|
guided_diffusion/losses.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Helpers for various likelihood-based losses. These are ported from the original
|
| 3 |
+
Ho et al. diffusion models codebase:
|
| 4 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
import torch as th
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def normal_kl(mean1, logvar1, mean2, logvar2):
|
| 13 |
+
"""
|
| 14 |
+
Compute the KL divergence between two gaussians.
|
| 15 |
+
|
| 16 |
+
Shapes are automatically broadcasted, so batches can be compared to
|
| 17 |
+
scalars, among other use cases.
|
| 18 |
+
"""
|
| 19 |
+
tensor = None
|
| 20 |
+
for obj in (mean1, logvar1, mean2, logvar2):
|
| 21 |
+
if isinstance(obj, th.Tensor):
|
| 22 |
+
tensor = obj
|
| 23 |
+
break
|
| 24 |
+
assert tensor is not None, "at least one argument must be a Tensor"
|
| 25 |
+
|
| 26 |
+
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
| 27 |
+
# Tensors, but it does not work for th.exp().
|
| 28 |
+
logvar1, logvar2 = [
|
| 29 |
+
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
|
| 30 |
+
for x in (logvar1, logvar2)
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
return 0.5 * (
|
| 34 |
+
-1.0
|
| 35 |
+
+ logvar2
|
| 36 |
+
- logvar1
|
| 37 |
+
+ th.exp(logvar1 - logvar2)
|
| 38 |
+
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def approx_standard_normal_cdf(x):
|
| 43 |
+
"""
|
| 44 |
+
A fast approximation of the cumulative distribution function of the
|
| 45 |
+
standard normal.
|
| 46 |
+
"""
|
| 47 |
+
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
|
| 51 |
+
"""
|
| 52 |
+
Compute the log-likelihood of a Gaussian distribution discretizing to a
|
| 53 |
+
given image.
|
| 54 |
+
|
| 55 |
+
:param x: the target images. It is assumed that this was uint8 values,
|
| 56 |
+
rescaled to the range [-1, 1].
|
| 57 |
+
:param means: the Gaussian mean Tensor.
|
| 58 |
+
:param log_scales: the Gaussian log stddev Tensor.
|
| 59 |
+
:return: a tensor like x of log probabilities (in nats).
|
| 60 |
+
"""
|
| 61 |
+
assert x.shape == means.shape == log_scales.shape
|
| 62 |
+
centered_x = x - means
|
| 63 |
+
inv_stdv = th.exp(-log_scales)
|
| 64 |
+
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
|
| 65 |
+
cdf_plus = approx_standard_normal_cdf(plus_in)
|
| 66 |
+
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
|
| 67 |
+
cdf_min = approx_standard_normal_cdf(min_in)
|
| 68 |
+
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
|
| 69 |
+
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
|
| 70 |
+
cdf_delta = cdf_plus - cdf_min
|
| 71 |
+
log_probs = th.where(
|
| 72 |
+
x < -0.999,
|
| 73 |
+
log_cdf_plus,
|
| 74 |
+
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
|
| 75 |
+
)
|
| 76 |
+
assert log_probs.shape == x.shape
|
| 77 |
+
return log_probs
|
guided_diffusion/nn.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Various utilities for neural networks.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
|
| 7 |
+
import torch as th
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
|
| 12 |
+
class SiLU(nn.Module):
|
| 13 |
+
def forward(self, x):
|
| 14 |
+
return x * th.sigmoid(x)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class GroupNorm32(nn.GroupNorm):
|
| 18 |
+
def forward(self, x):
|
| 19 |
+
return super().forward(x.float()).type(x.dtype)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def conv_nd(dims, *args, **kwargs):
|
| 23 |
+
"""
|
| 24 |
+
Create a 1D, 2D, or 3D convolution module.
|
| 25 |
+
"""
|
| 26 |
+
if dims == 1:
|
| 27 |
+
return nn.Conv1d(*args, **kwargs)
|
| 28 |
+
elif dims == 2:
|
| 29 |
+
return nn.Conv2d(*args, **kwargs)
|
| 30 |
+
elif dims == 3:
|
| 31 |
+
return nn.Conv3d(*args, **kwargs)
|
| 32 |
+
raise ValueError(f"unsupported dimensions: {dims}")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def linear(*args, **kwargs):
|
| 36 |
+
"""
|
| 37 |
+
Create a linear module.
|
| 38 |
+
"""
|
| 39 |
+
return nn.Linear(*args, **kwargs)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def avg_pool_nd(dims, *args, **kwargs):
|
| 43 |
+
"""
|
| 44 |
+
Create a 1D, 2D, or 3D average pooling module.
|
| 45 |
+
"""
|
| 46 |
+
if dims == 1:
|
| 47 |
+
return nn.AvgPool1d(*args, **kwargs)
|
| 48 |
+
elif dims == 2:
|
| 49 |
+
return nn.AvgPool2d(*args, **kwargs)
|
| 50 |
+
elif dims == 3:
|
| 51 |
+
return nn.AvgPool3d(*args, **kwargs)
|
| 52 |
+
raise ValueError(f"unsupported dimensions: {dims}")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def update_ema(target_params, source_params, rate=0.99):
|
| 56 |
+
"""
|
| 57 |
+
Update target parameters to be closer to those of source parameters using
|
| 58 |
+
an exponential moving average.
|
| 59 |
+
|
| 60 |
+
:param target_params: the target parameter sequence.
|
| 61 |
+
:param source_params: the source parameter sequence.
|
| 62 |
+
:param rate: the EMA rate (closer to 1 means slower).
|
| 63 |
+
"""
|
| 64 |
+
for targ, src in zip(target_params, source_params):
|
| 65 |
+
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def zero_module(module):
|
| 69 |
+
"""
|
| 70 |
+
Zero out the parameters of a module and return it.
|
| 71 |
+
"""
|
| 72 |
+
for p in module.parameters():
|
| 73 |
+
p.detach().zero_()
|
| 74 |
+
return module
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def scale_module(module, scale):
|
| 78 |
+
"""
|
| 79 |
+
Scale the parameters of a module and return it.
|
| 80 |
+
"""
|
| 81 |
+
for p in module.parameters():
|
| 82 |
+
p.detach().mul_(scale)
|
| 83 |
+
return module
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def mean_flat(tensor):
|
| 87 |
+
"""
|
| 88 |
+
Take the mean over all non-batch dimensions.
|
| 89 |
+
"""
|
| 90 |
+
return tensor.mean(dim=list(range(2, len(tensor.shape))))
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def normalization(channels, groups=32):
|
| 94 |
+
"""
|
| 95 |
+
Make a standard normalization layer.
|
| 96 |
+
|
| 97 |
+
:param channels: number of input channels.
|
| 98 |
+
:return: an nn.Module for normalization.
|
| 99 |
+
"""
|
| 100 |
+
return GroupNorm32(groups, channels)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def timestep_embedding(timesteps, dim, max_period=10000):
|
| 104 |
+
"""
|
| 105 |
+
Create sinusoidal timestep embeddings.
|
| 106 |
+
|
| 107 |
+
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
| 108 |
+
These may be fractional.
|
| 109 |
+
:param dim: the dimension of the output.
|
| 110 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
| 111 |
+
:return: an [N x dim] Tensor of positional embeddings.
|
| 112 |
+
"""
|
| 113 |
+
half = dim // 2
|
| 114 |
+
freqs = th.exp(
|
| 115 |
+
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
|
| 116 |
+
).to(device=timesteps.device)
|
| 117 |
+
args = timesteps[:, None].float() * freqs[None]
|
| 118 |
+
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
|
| 119 |
+
if dim % 2:
|
| 120 |
+
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
|
| 121 |
+
return embedding
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def checkpoint(func, inputs, params, flag):
|
| 125 |
+
"""
|
| 126 |
+
Evaluate a function without caching intermediate activations, allowing for
|
| 127 |
+
reduced memory at the expense of extra compute in the backward pass.
|
| 128 |
+
|
| 129 |
+
:param func: the function to evaluate.
|
| 130 |
+
:param inputs: the argument sequence to pass to `func`.
|
| 131 |
+
:param params: a sequence of parameters `func` depends on but does not
|
| 132 |
+
explicitly take as arguments.
|
| 133 |
+
:param flag: if False, disable gradient checkpointing.
|
| 134 |
+
"""
|
| 135 |
+
if flag:
|
| 136 |
+
args = tuple(inputs) + tuple(params)
|
| 137 |
+
return CheckpointFunction.apply(func, len(inputs), *args)
|
| 138 |
+
else:
|
| 139 |
+
return func(*inputs)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class CheckpointFunction(th.autograd.Function):
|
| 143 |
+
@staticmethod
|
| 144 |
+
def forward(ctx, run_function, length, *args):
|
| 145 |
+
ctx.run_function = run_function
|
| 146 |
+
ctx.input_tensors = list(args[:length])
|
| 147 |
+
ctx.input_params = list(args[length:])
|
| 148 |
+
with th.no_grad():
|
| 149 |
+
output_tensors = ctx.run_function(*ctx.input_tensors)
|
| 150 |
+
return output_tensors
|
| 151 |
+
|
| 152 |
+
@staticmethod
|
| 153 |
+
def backward(ctx, *output_grads):
|
| 154 |
+
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
|
| 155 |
+
with th.enable_grad():
|
| 156 |
+
# Fixes a bug where the first op in run_function modifies the
|
| 157 |
+
# Tensor storage in place, which is not allowed for detach()'d
|
| 158 |
+
# Tensors.
|
| 159 |
+
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
|
| 160 |
+
output_tensors = ctx.run_function(*shallow_copies)
|
| 161 |
+
input_grads = th.autograd.grad(
|
| 162 |
+
output_tensors,
|
| 163 |
+
ctx.input_tensors + ctx.input_params,
|
| 164 |
+
output_grads,
|
| 165 |
+
allow_unused=True,
|
| 166 |
+
)
|
| 167 |
+
del ctx.input_tensors
|
| 168 |
+
del ctx.input_params
|
| 169 |
+
del output_tensors
|
| 170 |
+
return (None, None) + input_grads
|
guided_diffusion/pretrain_checks.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import torch as th
|
| 4 |
+
|
| 5 |
+
from . import dist_util, logger
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def run_pretrain_checks(args, dataloader, model, diffusion, schedule_sampler):
|
| 9 |
+
"""Run a set of quick checks before starting training."""
|
| 10 |
+
logdir = logger.get_dir() or os.getcwd()
|
| 11 |
+
os.makedirs(logdir, exist_ok=True)
|
| 12 |
+
|
| 13 |
+
# Save hyperparameters
|
| 14 |
+
with open(os.path.join(logdir, "hyperparameters.json"), "w") as f:
|
| 15 |
+
json.dump(vars(args), f, indent=2)
|
| 16 |
+
|
| 17 |
+
# Fetch one batch from dataloader and save example inputs
|
| 18 |
+
sample = next(iter(dataloader))
|
| 19 |
+
if args.dataset == "inpaint":
|
| 20 |
+
example = sample[0]
|
| 21 |
+
else:
|
| 22 |
+
example = sample
|
| 23 |
+
th.save(example.cpu(), os.path.join(logdir, "example_input.pt"))
|
| 24 |
+
|
| 25 |
+
model.to(dist_util.dev([0, 1]) if len(args.devices) > 1 else dist_util.dev())
|
| 26 |
+
model.train()
|
| 27 |
+
|
| 28 |
+
cond = {}
|
| 29 |
+
if args.dataset == "inpaint":
|
| 30 |
+
cond = {"mask": sample[1].to(dist_util.dev())}
|
| 31 |
+
batch = sample[0].to(dist_util.dev())
|
| 32 |
+
else:
|
| 33 |
+
batch = sample.to(dist_util.dev())
|
| 34 |
+
|
| 35 |
+
t, _ = schedule_sampler.sample(1, dist_util.dev())
|
| 36 |
+
losses, _, _ = diffusion.training_losses(
|
| 37 |
+
model,
|
| 38 |
+
x_start=batch[:1],
|
| 39 |
+
t=t,
|
| 40 |
+
model_kwargs=cond,
|
| 41 |
+
labels=None,
|
| 42 |
+
mode="inpaint" if args.dataset == "inpaint" else "default",
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
loss = losses["mse_wav"].mean()
|
| 46 |
+
if not th.isfinite(loss):
|
| 47 |
+
raise RuntimeError("Non-finite loss encountered during checks")
|
| 48 |
+
|
| 49 |
+
loss.backward()
|
| 50 |
+
for p in model.parameters():
|
| 51 |
+
if p.grad is None:
|
| 52 |
+
continue
|
| 53 |
+
if not th.isfinite(p.grad).all():
|
| 54 |
+
raise RuntimeError("Non-finite gradient encountered during checks")
|
| 55 |
+
|
| 56 |
+
logger.log("Pre-training checks passed.")
|
guided_diffusion/resample.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC, abstractmethod
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch as th
|
| 5 |
+
import torch.distributed as dist
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def create_named_schedule_sampler(name, diffusion, maxt):
|
| 9 |
+
"""
|
| 10 |
+
Create a ScheduleSampler from a library of pre-defined samplers.
|
| 11 |
+
|
| 12 |
+
:param name: the name of the sampler.
|
| 13 |
+
:param diffusion: the diffusion object to sample for.
|
| 14 |
+
"""
|
| 15 |
+
if name == "uniform":
|
| 16 |
+
return UniformSampler(diffusion, maxt)
|
| 17 |
+
elif name == "loss-second-moment":
|
| 18 |
+
return LossSecondMomentResampler(diffusion)
|
| 19 |
+
else:
|
| 20 |
+
raise NotImplementedError(f"unknown schedule sampler: {name}")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ScheduleSampler(ABC):
|
| 24 |
+
"""
|
| 25 |
+
A distribution over timesteps in the diffusion process, intended to reduce
|
| 26 |
+
variance of the objective.
|
| 27 |
+
|
| 28 |
+
By default, samplers perform unbiased importance sampling, in which the
|
| 29 |
+
objective's mean is unchanged.
|
| 30 |
+
However, subclasses may override sample() to change how the resampled
|
| 31 |
+
terms are reweighted, allowing for actual changes in the objective.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
@abstractmethod
|
| 35 |
+
def weights(self):
|
| 36 |
+
"""
|
| 37 |
+
Get a numpy array of weights, one per diffusion step.
|
| 38 |
+
|
| 39 |
+
The weights needn't be normalized, but must be positive.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def sample(self, batch_size, device):
|
| 43 |
+
"""
|
| 44 |
+
Importance-sample timesteps for a batch.
|
| 45 |
+
|
| 46 |
+
:param batch_size: the number of timesteps.
|
| 47 |
+
:param device: the torch device to save to.
|
| 48 |
+
:return: a tuple (timesteps, weights):
|
| 49 |
+
- timesteps: a tensor of timestep indices.
|
| 50 |
+
- weights: a tensor of weights to scale the resulting losses.
|
| 51 |
+
"""
|
| 52 |
+
w = self.weights()
|
| 53 |
+
p = w / np.sum(w)
|
| 54 |
+
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
|
| 55 |
+
indices = th.from_numpy(indices_np).long().to(device)
|
| 56 |
+
weights_np = 1 / (len(p) * p[indices_np])
|
| 57 |
+
weights = th.from_numpy(weights_np).float().to(device)
|
| 58 |
+
return indices, weights
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class UniformSampler(ScheduleSampler):
|
| 62 |
+
def __init__(self, diffusion, maxt):
|
| 63 |
+
self.diffusion = diffusion
|
| 64 |
+
self._weights = np.ones([maxt])
|
| 65 |
+
|
| 66 |
+
def weights(self):
|
| 67 |
+
return self._weights
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class LossAwareSampler(ScheduleSampler):
|
| 71 |
+
def update_with_local_losses(self, local_ts, local_losses):
|
| 72 |
+
"""
|
| 73 |
+
Update the reweighting using losses from a model.
|
| 74 |
+
|
| 75 |
+
Call this method from each rank with a batch of timesteps and the
|
| 76 |
+
corresponding losses for each of those timesteps.
|
| 77 |
+
This method will perform synchronization to make sure all of the ranks
|
| 78 |
+
maintain the exact same reweighting.
|
| 79 |
+
|
| 80 |
+
:param local_ts: an integer Tensor of timesteps.
|
| 81 |
+
:param local_losses: a 1D Tensor of losses.
|
| 82 |
+
"""
|
| 83 |
+
batch_sizes = [
|
| 84 |
+
th.tensor([0], dtype=th.int32, device=local_ts.device)
|
| 85 |
+
for _ in range(dist.get_world_size())
|
| 86 |
+
]
|
| 87 |
+
dist.all_gather(
|
| 88 |
+
batch_sizes,
|
| 89 |
+
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# Pad all_gather batches to be the maximum batch size.
|
| 93 |
+
batch_sizes = [x.item() for x in batch_sizes]
|
| 94 |
+
max_bs = max(batch_sizes)
|
| 95 |
+
|
| 96 |
+
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
|
| 97 |
+
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
|
| 98 |
+
dist.all_gather(timestep_batches, local_ts)
|
| 99 |
+
dist.all_gather(loss_batches, local_losses)
|
| 100 |
+
timesteps = [
|
| 101 |
+
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
|
| 102 |
+
]
|
| 103 |
+
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
|
| 104 |
+
self.update_with_all_losses(timesteps, losses)
|
| 105 |
+
|
| 106 |
+
@abstractmethod
|
| 107 |
+
def update_with_all_losses(self, ts, losses):
|
| 108 |
+
"""
|
| 109 |
+
Update the reweighting using losses from a model.
|
| 110 |
+
|
| 111 |
+
Sub-classes should override this method to update the reweighting
|
| 112 |
+
using losses from the model.
|
| 113 |
+
|
| 114 |
+
This method directly updates the reweighting without synchronizing
|
| 115 |
+
between workers. It is called by update_with_local_losses from all
|
| 116 |
+
ranks with identical arguments. Thus, it should have deterministic
|
| 117 |
+
behavior to maintain state across workers.
|
| 118 |
+
|
| 119 |
+
:param ts: a list of int timesteps.
|
| 120 |
+
:param losses: a list of float losses, one per timestep.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class LossSecondMomentResampler(LossAwareSampler):
|
| 125 |
+
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
|
| 126 |
+
self.diffusion = diffusion
|
| 127 |
+
self.history_per_term = history_per_term
|
| 128 |
+
self.uniform_prob = uniform_prob
|
| 129 |
+
self._loss_history = np.zeros(
|
| 130 |
+
[diffusion.num_timesteps, history_per_term], dtype=np.float64
|
| 131 |
+
)
|
| 132 |
+
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
|
| 133 |
+
|
| 134 |
+
def weights(self):
|
| 135 |
+
if not self._warmed_up():
|
| 136 |
+
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
|
| 137 |
+
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
|
| 138 |
+
weights /= np.sum(weights)
|
| 139 |
+
weights *= 1 - self.uniform_prob
|
| 140 |
+
weights += self.uniform_prob / len(weights)
|
| 141 |
+
return weights
|
| 142 |
+
|
| 143 |
+
def update_with_all_losses(self, ts, losses):
|
| 144 |
+
for t, loss in zip(ts, losses):
|
| 145 |
+
if self._loss_counts[t] == self.history_per_term:
|
| 146 |
+
# Shift out the oldest loss term.
|
| 147 |
+
self._loss_history[t, :-1] = self._loss_history[t, 1:]
|
| 148 |
+
self._loss_history[t, -1] = loss
|
| 149 |
+
else:
|
| 150 |
+
self._loss_history[t, self._loss_counts[t]] = loss
|
| 151 |
+
self._loss_counts[t] += 1
|
| 152 |
+
|
| 153 |
+
def _warmed_up(self):
|
| 154 |
+
return (self._loss_counts == self.history_per_term).all()
|
guided_diffusion/respace.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch as th
|
| 3 |
+
|
| 4 |
+
from .gaussian_diffusion import GaussianDiffusion
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def space_timesteps(num_timesteps, section_counts):
|
| 8 |
+
"""
|
| 9 |
+
Create a list of timesteps to use from an original diffusion process,
|
| 10 |
+
given the number of timesteps we want to take from equally-sized portions
|
| 11 |
+
of the original process.
|
| 12 |
+
|
| 13 |
+
For example, if there's 300 timesteps and the section counts are [10,15,20]
|
| 14 |
+
then the first 100 timesteps are strided to be 10 timesteps, the second 100
|
| 15 |
+
are strided to be 15 timesteps, and the final 100 are strided to be 20.
|
| 16 |
+
|
| 17 |
+
If the stride is a string starting with "ddim", then the fixed striding
|
| 18 |
+
from the DDIM paper is used, and only one section is allowed.
|
| 19 |
+
|
| 20 |
+
:param num_timesteps: the number of diffusion steps in the original
|
| 21 |
+
process to divide up.
|
| 22 |
+
:param section_counts: either a list of numbers, or a string containing
|
| 23 |
+
comma-separated numbers, indicating the step count
|
| 24 |
+
per section. As a special case, use "ddimN" where N
|
| 25 |
+
is a number of steps to use the striding from the
|
| 26 |
+
DDIM paper.
|
| 27 |
+
:return: a set of diffusion steps from the original process to use.
|
| 28 |
+
"""
|
| 29 |
+
if isinstance(section_counts, str):
|
| 30 |
+
if section_counts.startswith("ddim"):
|
| 31 |
+
desired_count = int(section_counts[len("ddim") :])
|
| 32 |
+
print('desired_cound', desired_count )
|
| 33 |
+
for i in range(1, num_timesteps):
|
| 34 |
+
if len(range(0, num_timesteps, i)) == desired_count:
|
| 35 |
+
return set(range(0, num_timesteps, i))
|
| 36 |
+
raise ValueError(
|
| 37 |
+
f"cannot create exactly {num_timesteps} steps with an integer stride"
|
| 38 |
+
)
|
| 39 |
+
section_counts = [int(x) for x in section_counts.split(",")]
|
| 40 |
+
# print('sectioncount', section_counts)
|
| 41 |
+
size_per = num_timesteps // len(section_counts)
|
| 42 |
+
extra = num_timesteps % len(section_counts)
|
| 43 |
+
start_idx = 0
|
| 44 |
+
all_steps = []
|
| 45 |
+
for i, section_count in enumerate(section_counts):
|
| 46 |
+
size = size_per + (1 if i < extra else 0)
|
| 47 |
+
if size < section_count:
|
| 48 |
+
raise ValueError(
|
| 49 |
+
f"cannot divide section of {size} steps into {section_count}"
|
| 50 |
+
)
|
| 51 |
+
if section_count <= 1:
|
| 52 |
+
frac_stride = 1
|
| 53 |
+
else:
|
| 54 |
+
frac_stride = (size - 1) / (section_count - 1)
|
| 55 |
+
cur_idx = 0.0
|
| 56 |
+
taken_steps = []
|
| 57 |
+
for _ in range(section_count):
|
| 58 |
+
taken_steps.append(start_idx + round(cur_idx))
|
| 59 |
+
cur_idx += frac_stride
|
| 60 |
+
all_steps += taken_steps
|
| 61 |
+
start_idx += size
|
| 62 |
+
return set(all_steps)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class SpacedDiffusion(GaussianDiffusion):
|
| 66 |
+
"""
|
| 67 |
+
A diffusion process which can skip steps in a base diffusion process.
|
| 68 |
+
|
| 69 |
+
:param use_timesteps: a collection (sequence or set) of timesteps from the
|
| 70 |
+
original diffusion process to retain.
|
| 71 |
+
:param kwargs: the kwargs to create the base diffusion process.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(self, use_timesteps, **kwargs):
|
| 75 |
+
self.use_timesteps = set(use_timesteps)
|
| 76 |
+
self.timestep_map = []
|
| 77 |
+
self.original_num_steps = len(kwargs["betas"])
|
| 78 |
+
|
| 79 |
+
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
|
| 80 |
+
last_alpha_cumprod = 1.0
|
| 81 |
+
new_betas = []
|
| 82 |
+
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
|
| 83 |
+
if i in self.use_timesteps:
|
| 84 |
+
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
|
| 85 |
+
last_alpha_cumprod = alpha_cumprod
|
| 86 |
+
self.timestep_map.append(i)
|
| 87 |
+
kwargs["betas"] = np.array(new_betas)
|
| 88 |
+
super().__init__(**kwargs)
|
| 89 |
+
|
| 90 |
+
def p_mean_variance(
|
| 91 |
+
self, model, *args, **kwargs
|
| 92 |
+
): # pylint: disable=signature-differs
|
| 93 |
+
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
|
| 94 |
+
|
| 95 |
+
def training_losses(
|
| 96 |
+
self, model, *args, **kwargs
|
| 97 |
+
): # pylint: disable=signature-differs
|
| 98 |
+
return super().training_losses(self._wrap_model(model), *args, **kwargs)
|
| 99 |
+
|
| 100 |
+
def condition_mean(self, cond_fn, *args, **kwargs):
|
| 101 |
+
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
|
| 102 |
+
|
| 103 |
+
def condition_score(self, cond_fn, *args, **kwargs):
|
| 104 |
+
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
|
| 105 |
+
|
| 106 |
+
def _wrap_model(self, model):
|
| 107 |
+
if isinstance(model, _WrappedModel):
|
| 108 |
+
return model
|
| 109 |
+
return _WrappedModel(
|
| 110 |
+
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def _scale_timesteps(self, t):
|
| 115 |
+
# Scaling is done by the wrapped model.
|
| 116 |
+
return t
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class _WrappedModel:
|
| 120 |
+
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
|
| 121 |
+
self.model = model
|
| 122 |
+
self.timestep_map = timestep_map
|
| 123 |
+
self.rescale_timesteps = rescale_timesteps
|
| 124 |
+
self.original_num_steps = original_num_steps
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def __call__(self, x, ts, **kwargs):
|
| 128 |
+
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
|
| 129 |
+
new_ts = map_tensor[ts]
|
| 130 |
+
if self.rescale_timesteps:
|
| 131 |
+
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
|
| 132 |
+
return self.model(x, new_ts, **kwargs)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
guided_diffusion/script_util.py
ADDED
|
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import inspect
|
| 3 |
+
|
| 4 |
+
from . import gaussian_diffusion as gd
|
| 5 |
+
from .respace import SpacedDiffusion, space_timesteps
|
| 6 |
+
from .unet import SuperResModel, UNetModel, EncoderUNetModel
|
| 7 |
+
from .wunet import WavUNetModel
|
| 8 |
+
|
| 9 |
+
NUM_CLASSES = 2
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def diffusion_defaults():
|
| 13 |
+
"""
|
| 14 |
+
Defaults for image and classifier training.
|
| 15 |
+
"""
|
| 16 |
+
return dict(
|
| 17 |
+
learn_sigma=False,
|
| 18 |
+
diffusion_steps=1000,
|
| 19 |
+
noise_schedule="linear",
|
| 20 |
+
timestep_respacing="",
|
| 21 |
+
use_kl=False,
|
| 22 |
+
predict_xstart=False,
|
| 23 |
+
rescale_timesteps=False,
|
| 24 |
+
rescale_learned_sigmas=False,
|
| 25 |
+
dataset='brats',
|
| 26 |
+
dims=2,
|
| 27 |
+
num_groups=32,
|
| 28 |
+
in_channels=1,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def classifier_defaults():
|
| 33 |
+
"""
|
| 34 |
+
Defaults for classifier models.
|
| 35 |
+
"""
|
| 36 |
+
return dict(
|
| 37 |
+
image_size=64,
|
| 38 |
+
classifier_use_fp16=False,
|
| 39 |
+
classifier_width=128,
|
| 40 |
+
classifier_depth=2,
|
| 41 |
+
classifier_attention_resolutions="32,16,8", # 16
|
| 42 |
+
classifier_num_head_channels=64,
|
| 43 |
+
classifier_use_scale_shift_norm=True, # False
|
| 44 |
+
classifier_resblock_updown=True, # False
|
| 45 |
+
classifier_pool="spatial",
|
| 46 |
+
classifier_channel_mult="1,1,2,2,4,4",
|
| 47 |
+
dataset='brats'
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def model_and_diffusion_defaults():
|
| 52 |
+
"""
|
| 53 |
+
Defaults for image training.
|
| 54 |
+
"""
|
| 55 |
+
res = dict(
|
| 56 |
+
image_size=64,
|
| 57 |
+
num_channels=128,
|
| 58 |
+
num_res_blocks=2,
|
| 59 |
+
num_heads=4,
|
| 60 |
+
num_heads_upsample=-1,
|
| 61 |
+
num_head_channels=-1,
|
| 62 |
+
attention_resolutions="16,8",
|
| 63 |
+
channel_mult="",
|
| 64 |
+
dropout=0.0,
|
| 65 |
+
class_cond=False,
|
| 66 |
+
use_checkpoint=False,
|
| 67 |
+
use_scale_shift_norm=True,
|
| 68 |
+
resblock_updown=True,
|
| 69 |
+
use_fp16=False,
|
| 70 |
+
use_new_attention_order=False,
|
| 71 |
+
dims=2,
|
| 72 |
+
num_groups=32,
|
| 73 |
+
in_channels=1,
|
| 74 |
+
out_channels=0, # automatically determine if 0
|
| 75 |
+
bottleneck_attention=True,
|
| 76 |
+
resample_2d=True,
|
| 77 |
+
additive_skips=False,
|
| 78 |
+
mode='default',
|
| 79 |
+
use_freq=False,
|
| 80 |
+
predict_xstart=False,
|
| 81 |
+
)
|
| 82 |
+
res.update(diffusion_defaults())
|
| 83 |
+
return res
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def classifier_and_diffusion_defaults():
|
| 87 |
+
res = classifier_defaults()
|
| 88 |
+
res.update(diffusion_defaults())
|
| 89 |
+
return res
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def create_model_and_diffusion(
|
| 93 |
+
image_size,
|
| 94 |
+
class_cond,
|
| 95 |
+
learn_sigma,
|
| 96 |
+
num_channels,
|
| 97 |
+
num_res_blocks,
|
| 98 |
+
channel_mult,
|
| 99 |
+
num_heads,
|
| 100 |
+
num_head_channels,
|
| 101 |
+
num_heads_upsample,
|
| 102 |
+
attention_resolutions,
|
| 103 |
+
dropout,
|
| 104 |
+
diffusion_steps,
|
| 105 |
+
noise_schedule,
|
| 106 |
+
timestep_respacing,
|
| 107 |
+
use_kl,
|
| 108 |
+
predict_xstart,
|
| 109 |
+
rescale_timesteps,
|
| 110 |
+
rescale_learned_sigmas,
|
| 111 |
+
use_checkpoint,
|
| 112 |
+
use_scale_shift_norm,
|
| 113 |
+
resblock_updown,
|
| 114 |
+
use_fp16,
|
| 115 |
+
use_new_attention_order,
|
| 116 |
+
dims,
|
| 117 |
+
num_groups,
|
| 118 |
+
in_channels,
|
| 119 |
+
out_channels,
|
| 120 |
+
bottleneck_attention,
|
| 121 |
+
resample_2d,
|
| 122 |
+
additive_skips,
|
| 123 |
+
mode,
|
| 124 |
+
use_freq,
|
| 125 |
+
dataset,
|
| 126 |
+
):
|
| 127 |
+
model = create_model(
|
| 128 |
+
image_size,
|
| 129 |
+
num_channels,
|
| 130 |
+
num_res_blocks,
|
| 131 |
+
channel_mult=channel_mult,
|
| 132 |
+
learn_sigma=learn_sigma,
|
| 133 |
+
class_cond=class_cond,
|
| 134 |
+
use_checkpoint=use_checkpoint,
|
| 135 |
+
attention_resolutions=attention_resolutions,
|
| 136 |
+
num_heads=num_heads,
|
| 137 |
+
num_head_channels=num_head_channels,
|
| 138 |
+
num_heads_upsample=num_heads_upsample,
|
| 139 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 140 |
+
dropout=dropout,
|
| 141 |
+
resblock_updown=resblock_updown,
|
| 142 |
+
use_fp16=use_fp16,
|
| 143 |
+
use_new_attention_order=use_new_attention_order,
|
| 144 |
+
dims=dims,
|
| 145 |
+
num_groups=num_groups,
|
| 146 |
+
in_channels=in_channels,
|
| 147 |
+
out_channels=out_channels,
|
| 148 |
+
bottleneck_attention=bottleneck_attention,
|
| 149 |
+
resample_2d=resample_2d,
|
| 150 |
+
additive_skips=additive_skips,
|
| 151 |
+
use_freq=use_freq,
|
| 152 |
+
)
|
| 153 |
+
diffusion = create_gaussian_diffusion(
|
| 154 |
+
steps=diffusion_steps,
|
| 155 |
+
learn_sigma=learn_sigma,
|
| 156 |
+
noise_schedule=noise_schedule,
|
| 157 |
+
use_kl=use_kl,
|
| 158 |
+
predict_xstart=predict_xstart,
|
| 159 |
+
rescale_timesteps=rescale_timesteps,
|
| 160 |
+
rescale_learned_sigmas=rescale_learned_sigmas,
|
| 161 |
+
timestep_respacing=timestep_respacing,
|
| 162 |
+
mode=mode,
|
| 163 |
+
)
|
| 164 |
+
return model, diffusion
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def create_model(
|
| 168 |
+
image_size,
|
| 169 |
+
num_channels,
|
| 170 |
+
num_res_blocks,
|
| 171 |
+
channel_mult="",
|
| 172 |
+
learn_sigma=False,
|
| 173 |
+
class_cond=False,
|
| 174 |
+
use_checkpoint=False,
|
| 175 |
+
attention_resolutions="16",
|
| 176 |
+
num_heads=1,
|
| 177 |
+
num_head_channels=-1,
|
| 178 |
+
num_heads_upsample=-1,
|
| 179 |
+
use_scale_shift_norm=False,
|
| 180 |
+
dropout=0,
|
| 181 |
+
resblock_updown=True,
|
| 182 |
+
use_fp16=False,
|
| 183 |
+
use_new_attention_order=False,
|
| 184 |
+
num_groups=32,
|
| 185 |
+
dims=2,
|
| 186 |
+
in_channels=1,
|
| 187 |
+
out_channels=0, # automatically determine if 0
|
| 188 |
+
bottleneck_attention=True,
|
| 189 |
+
resample_2d=True,
|
| 190 |
+
additive_skips=False,
|
| 191 |
+
use_freq=False,
|
| 192 |
+
):
|
| 193 |
+
if not channel_mult:
|
| 194 |
+
if image_size == 512:
|
| 195 |
+
channel_mult = (1, 1, 2, 2, 4, 4)
|
| 196 |
+
elif image_size == 256:
|
| 197 |
+
channel_mult = (1, 2, 2, 4, 4, 4)
|
| 198 |
+
elif image_size == 128:
|
| 199 |
+
channel_mult = (1, 2, 2, 4, 4)
|
| 200 |
+
elif image_size == 64:
|
| 201 |
+
channel_mult = (1, 2, 3, 4)
|
| 202 |
+
else:
|
| 203 |
+
raise ValueError(f"[MODEL] Unsupported image size: {image_size}")
|
| 204 |
+
else:
|
| 205 |
+
if isinstance(channel_mult, str):
|
| 206 |
+
from ast import literal_eval
|
| 207 |
+
channel_mult = literal_eval(channel_mult)
|
| 208 |
+
elif isinstance(channel_mult, tuple): # do nothing
|
| 209 |
+
pass
|
| 210 |
+
else:
|
| 211 |
+
raise ValueError(f"[MODEL] Value for {channel_mult=} not supported")
|
| 212 |
+
|
| 213 |
+
attention_ds = []
|
| 214 |
+
if attention_resolutions:
|
| 215 |
+
for res in attention_resolutions.split(","):
|
| 216 |
+
attention_ds.append(image_size // int(res))
|
| 217 |
+
if out_channels == 0:
|
| 218 |
+
out_channels = (2*in_channels if learn_sigma else in_channels)
|
| 219 |
+
|
| 220 |
+
if not use_freq:
|
| 221 |
+
return UNetModel(
|
| 222 |
+
image_size=image_size,
|
| 223 |
+
in_channels=in_channels,
|
| 224 |
+
model_channels=num_channels,
|
| 225 |
+
out_channels=out_channels * (1 if not learn_sigma else 2),
|
| 226 |
+
num_res_blocks=num_res_blocks,
|
| 227 |
+
attention_resolutions=tuple(attention_ds),
|
| 228 |
+
dropout=dropout,
|
| 229 |
+
channel_mult=channel_mult,
|
| 230 |
+
num_classes=(NUM_CLASSES if class_cond else None),
|
| 231 |
+
use_checkpoint=use_checkpoint,
|
| 232 |
+
use_fp16=use_fp16,
|
| 233 |
+
num_heads=num_heads,
|
| 234 |
+
num_head_channels=num_head_channels,
|
| 235 |
+
num_heads_upsample=num_heads_upsample,
|
| 236 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 237 |
+
resblock_updown=resblock_updown,
|
| 238 |
+
use_new_attention_order=use_new_attention_order,
|
| 239 |
+
dims=dims,
|
| 240 |
+
num_groups=num_groups,
|
| 241 |
+
bottleneck_attention=bottleneck_attention,
|
| 242 |
+
additive_skips=additive_skips,
|
| 243 |
+
resample_2d=resample_2d,
|
| 244 |
+
)
|
| 245 |
+
else:
|
| 246 |
+
return WavUNetModel(
|
| 247 |
+
image_size=image_size,
|
| 248 |
+
in_channels=in_channels,
|
| 249 |
+
model_channels=num_channels,
|
| 250 |
+
out_channels=out_channels * (1 if not learn_sigma else 2),
|
| 251 |
+
num_res_blocks=num_res_blocks,
|
| 252 |
+
attention_resolutions=tuple(attention_ds),
|
| 253 |
+
dropout=dropout,
|
| 254 |
+
channel_mult=channel_mult,
|
| 255 |
+
num_classes=(NUM_CLASSES if class_cond else None),
|
| 256 |
+
use_checkpoint=use_checkpoint,
|
| 257 |
+
use_fp16=use_fp16,
|
| 258 |
+
num_heads=num_heads,
|
| 259 |
+
num_head_channels=num_head_channels,
|
| 260 |
+
num_heads_upsample=num_heads_upsample,
|
| 261 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 262 |
+
resblock_updown=resblock_updown,
|
| 263 |
+
use_new_attention_order=use_new_attention_order,
|
| 264 |
+
dims=dims,
|
| 265 |
+
num_groups=num_groups,
|
| 266 |
+
bottleneck_attention=bottleneck_attention,
|
| 267 |
+
additive_skips=additive_skips,
|
| 268 |
+
use_freq=use_freq,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def create_classifier_and_diffusion(
|
| 273 |
+
image_size,
|
| 274 |
+
classifier_use_fp16,
|
| 275 |
+
classifier_width,
|
| 276 |
+
classifier_depth,
|
| 277 |
+
classifier_attention_resolutions,
|
| 278 |
+
classifier_num_head_channels,
|
| 279 |
+
classifier_use_scale_shift_norm,
|
| 280 |
+
classifier_resblock_updown,
|
| 281 |
+
classifier_pool,
|
| 282 |
+
classifier_channel_mult,
|
| 283 |
+
learn_sigma,
|
| 284 |
+
diffusion_steps,
|
| 285 |
+
noise_schedule,
|
| 286 |
+
timestep_respacing,
|
| 287 |
+
use_kl,
|
| 288 |
+
predict_xstart,
|
| 289 |
+
rescale_timesteps,
|
| 290 |
+
rescale_learned_sigmas,
|
| 291 |
+
dataset,
|
| 292 |
+
dims,
|
| 293 |
+
num_groups,
|
| 294 |
+
in_channels,
|
| 295 |
+
):
|
| 296 |
+
print('timestepresp2', timestep_respacing)
|
| 297 |
+
classifier = create_classifier(
|
| 298 |
+
image_size,
|
| 299 |
+
classifier_use_fp16,
|
| 300 |
+
classifier_width,
|
| 301 |
+
classifier_depth,
|
| 302 |
+
classifier_attention_resolutions,
|
| 303 |
+
classifier_use_scale_shift_norm,
|
| 304 |
+
classifier_resblock_updown,
|
| 305 |
+
classifier_pool,
|
| 306 |
+
dataset,
|
| 307 |
+
dims=dims,
|
| 308 |
+
num_groups=num_groups,
|
| 309 |
+
in_channels=in_channels,
|
| 310 |
+
num_head_channels=classifier_num_head_channels,
|
| 311 |
+
classifier_channel_mult=classifier_channel_mult,
|
| 312 |
+
)
|
| 313 |
+
diffusion = create_gaussian_diffusion(
|
| 314 |
+
steps=diffusion_steps,
|
| 315 |
+
learn_sigma=learn_sigma,
|
| 316 |
+
noise_schedule=noise_schedule,
|
| 317 |
+
use_kl=use_kl,
|
| 318 |
+
predict_xstart=predict_xstart,
|
| 319 |
+
rescale_timesteps=rescale_timesteps,
|
| 320 |
+
rescale_learned_sigmas=rescale_learned_sigmas,
|
| 321 |
+
timestep_respacing=timestep_respacing,
|
| 322 |
+
)
|
| 323 |
+
return classifier, diffusion
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def create_classifier(
|
| 327 |
+
image_size,
|
| 328 |
+
classifier_use_fp16,
|
| 329 |
+
classifier_width,
|
| 330 |
+
classifier_depth,
|
| 331 |
+
classifier_attention_resolutions,
|
| 332 |
+
classifier_use_scale_shift_norm,
|
| 333 |
+
classifier_resblock_updown,
|
| 334 |
+
classifier_pool,
|
| 335 |
+
dataset,
|
| 336 |
+
num_groups=32,
|
| 337 |
+
dims=2,
|
| 338 |
+
in_channels=1,
|
| 339 |
+
num_head_channels=64,
|
| 340 |
+
classifier_channel_mult="",
|
| 341 |
+
):
|
| 342 |
+
channel_mult = classifier_channel_mult
|
| 343 |
+
if not channel_mult:
|
| 344 |
+
if image_size == 256:
|
| 345 |
+
channel_mult = (1, 1, 2, 2, 4, 4)
|
| 346 |
+
elif image_size == 128:
|
| 347 |
+
channel_mult = (1, 1, 2, 3, 4)
|
| 348 |
+
elif image_size == 64:
|
| 349 |
+
channel_mult = (1, 2, 3, 4)
|
| 350 |
+
else:
|
| 351 |
+
raise ValueError(f"unsupported image size: {image_size}")
|
| 352 |
+
else:
|
| 353 |
+
if isinstance(channel_mult, str):
|
| 354 |
+
#channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
|
| 355 |
+
from ast import literal_eval
|
| 356 |
+
channel_mult = literal_eval(channel_mult)
|
| 357 |
+
elif isinstance(channel_mult, tuple): # do nothing
|
| 358 |
+
pass
|
| 359 |
+
else:
|
| 360 |
+
raise ValueError(f"value for {channel_mult=} not supported")
|
| 361 |
+
|
| 362 |
+
attention_ds = []
|
| 363 |
+
if classifier_attention_resolutions:
|
| 364 |
+
for res in classifier_attention_resolutions.split(","):
|
| 365 |
+
attention_ds.append(image_size // int(res))
|
| 366 |
+
|
| 367 |
+
print('number_in_channels classifier', in_channels)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
return EncoderUNetModel(
|
| 371 |
+
image_size=image_size,
|
| 372 |
+
in_channels=in_channels,
|
| 373 |
+
model_channels=classifier_width,
|
| 374 |
+
out_channels=2,
|
| 375 |
+
num_res_blocks=classifier_depth,
|
| 376 |
+
attention_resolutions=tuple(attention_ds),
|
| 377 |
+
channel_mult=channel_mult,
|
| 378 |
+
use_fp16=classifier_use_fp16,
|
| 379 |
+
num_head_channels=num_head_channels,
|
| 380 |
+
use_scale_shift_norm=classifier_use_scale_shift_norm,
|
| 381 |
+
resblock_updown=classifier_resblock_updown,
|
| 382 |
+
pool=classifier_pool,
|
| 383 |
+
num_groups=num_groups,
|
| 384 |
+
dims=dims,
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def sr_model_and_diffusion_defaults():
|
| 389 |
+
res = model_and_diffusion_defaults()
|
| 390 |
+
res["large_size"] = 256
|
| 391 |
+
res["small_size"] = 64
|
| 392 |
+
arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
|
| 393 |
+
for k in res.copy().keys():
|
| 394 |
+
if k not in arg_names:
|
| 395 |
+
del res[k]
|
| 396 |
+
return res
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def sr_create_model_and_diffusion(
|
| 400 |
+
large_size,
|
| 401 |
+
small_size,
|
| 402 |
+
class_cond,
|
| 403 |
+
learn_sigma,
|
| 404 |
+
num_channels,
|
| 405 |
+
num_res_blocks,
|
| 406 |
+
num_heads,
|
| 407 |
+
num_head_channels,
|
| 408 |
+
num_heads_upsample,
|
| 409 |
+
attention_resolutions,
|
| 410 |
+
dropout,
|
| 411 |
+
diffusion_steps,
|
| 412 |
+
noise_schedule,
|
| 413 |
+
timestep_respacing,
|
| 414 |
+
use_kl,
|
| 415 |
+
predict_xstart,
|
| 416 |
+
rescale_timesteps,
|
| 417 |
+
rescale_learned_sigmas,
|
| 418 |
+
use_checkpoint,
|
| 419 |
+
use_scale_shift_norm,
|
| 420 |
+
resblock_updown,
|
| 421 |
+
use_fp16,
|
| 422 |
+
):
|
| 423 |
+
print('timestepresp3', timestep_respacing)
|
| 424 |
+
model = sr_create_model(
|
| 425 |
+
large_size,
|
| 426 |
+
small_size,
|
| 427 |
+
num_channels,
|
| 428 |
+
num_res_blocks,
|
| 429 |
+
learn_sigma=learn_sigma,
|
| 430 |
+
class_cond=class_cond,
|
| 431 |
+
use_checkpoint=use_checkpoint,
|
| 432 |
+
attention_resolutions=attention_resolutions,
|
| 433 |
+
num_heads=num_heads,
|
| 434 |
+
num_head_channels=num_head_channels,
|
| 435 |
+
num_heads_upsample=num_heads_upsample,
|
| 436 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 437 |
+
dropout=dropout,
|
| 438 |
+
resblock_updown=resblock_updown,
|
| 439 |
+
use_fp16=use_fp16,
|
| 440 |
+
)
|
| 441 |
+
diffusion = create_gaussian_diffusion(
|
| 442 |
+
steps=diffusion_steps,
|
| 443 |
+
learn_sigma=learn_sigma,
|
| 444 |
+
noise_schedule=noise_schedule,
|
| 445 |
+
use_kl=use_kl,
|
| 446 |
+
predict_xstart=predict_xstart,
|
| 447 |
+
rescale_timesteps=rescale_timesteps,
|
| 448 |
+
rescale_learned_sigmas=rescale_learned_sigmas,
|
| 449 |
+
timestep_respacing=timestep_respacing,
|
| 450 |
+
)
|
| 451 |
+
return model, diffusion
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def sr_create_model(
|
| 455 |
+
large_size,
|
| 456 |
+
small_size,
|
| 457 |
+
num_channels,
|
| 458 |
+
num_res_blocks,
|
| 459 |
+
learn_sigma,
|
| 460 |
+
class_cond,
|
| 461 |
+
use_checkpoint,
|
| 462 |
+
attention_resolutions,
|
| 463 |
+
num_heads,
|
| 464 |
+
num_head_channels,
|
| 465 |
+
num_heads_upsample,
|
| 466 |
+
use_scale_shift_norm,
|
| 467 |
+
dropout,
|
| 468 |
+
resblock_updown,
|
| 469 |
+
use_fp16,
|
| 470 |
+
):
|
| 471 |
+
_ = small_size # hack to prevent unused variable
|
| 472 |
+
|
| 473 |
+
if large_size == 512:
|
| 474 |
+
channel_mult = (1, 1, 2, 2, 4, 4)
|
| 475 |
+
elif large_size == 256:
|
| 476 |
+
channel_mult = (1, 1, 2, 2, 4, 4)
|
| 477 |
+
elif large_size == 64:
|
| 478 |
+
channel_mult = (1, 2, 3, 4)
|
| 479 |
+
else:
|
| 480 |
+
raise ValueError(f"unsupported large size: {large_size}")
|
| 481 |
+
|
| 482 |
+
attention_ds = []
|
| 483 |
+
for res in attention_resolutions.split(","):
|
| 484 |
+
attention_ds.append(large_size // int(res))
|
| 485 |
+
|
| 486 |
+
return SuperResModel(
|
| 487 |
+
image_size=large_size,
|
| 488 |
+
in_channels=3,
|
| 489 |
+
model_channels=num_channels,
|
| 490 |
+
out_channels=(3 if not learn_sigma else 6),
|
| 491 |
+
num_res_blocks=num_res_blocks,
|
| 492 |
+
attention_resolutions=tuple(attention_ds),
|
| 493 |
+
dropout=dropout,
|
| 494 |
+
channel_mult=channel_mult,
|
| 495 |
+
num_classes=(NUM_CLASSES if class_cond else None),
|
| 496 |
+
use_checkpoint=use_checkpoint,
|
| 497 |
+
num_heads=num_heads,
|
| 498 |
+
num_head_channels=num_head_channels,
|
| 499 |
+
num_heads_upsample=num_heads_upsample,
|
| 500 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 501 |
+
resblock_updown=resblock_updown,
|
| 502 |
+
use_fp16=use_fp16,
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def create_gaussian_diffusion(
|
| 507 |
+
*,
|
| 508 |
+
steps=1000,
|
| 509 |
+
learn_sigma=False,
|
| 510 |
+
sigma_small=False,
|
| 511 |
+
noise_schedule="linear",
|
| 512 |
+
use_kl=False,
|
| 513 |
+
predict_xstart=False,
|
| 514 |
+
rescale_timesteps=False,
|
| 515 |
+
rescale_learned_sigmas=False,
|
| 516 |
+
timestep_respacing="",
|
| 517 |
+
mode='default',
|
| 518 |
+
):
|
| 519 |
+
betas = gd.get_named_beta_schedule(noise_schedule, steps)
|
| 520 |
+
if use_kl:
|
| 521 |
+
loss_type = gd.LossType.RESCALED_KL
|
| 522 |
+
elif rescale_learned_sigmas:
|
| 523 |
+
loss_type = gd.LossType.RESCALED_MSE
|
| 524 |
+
else:
|
| 525 |
+
loss_type = gd.LossType.MSE
|
| 526 |
+
|
| 527 |
+
if not timestep_respacing:
|
| 528 |
+
timestep_respacing = [steps]
|
| 529 |
+
|
| 530 |
+
return SpacedDiffusion(
|
| 531 |
+
use_timesteps=space_timesteps(steps, timestep_respacing),
|
| 532 |
+
betas=betas,
|
| 533 |
+
model_mean_type=(gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X),
|
| 534 |
+
model_var_type=(
|
| 535 |
+
(
|
| 536 |
+
gd.ModelVarType.FIXED_LARGE
|
| 537 |
+
if not sigma_small
|
| 538 |
+
else gd.ModelVarType.FIXED_SMALL
|
| 539 |
+
)
|
| 540 |
+
if not learn_sigma
|
| 541 |
+
else gd.ModelVarType.LEARNED_RANGE
|
| 542 |
+
),
|
| 543 |
+
loss_type=loss_type,
|
| 544 |
+
rescale_timesteps=rescale_timesteps,
|
| 545 |
+
mode=mode,
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
def add_dict_to_argparser(parser, default_dict):
|
| 550 |
+
for k, v in default_dict.items():
|
| 551 |
+
v_type = type(v)
|
| 552 |
+
if v is None:
|
| 553 |
+
v_type = str
|
| 554 |
+
elif isinstance(v, bool):
|
| 555 |
+
v_type = str2bool
|
| 556 |
+
parser.add_argument(f"--{k}", default=v, type=v_type)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def args_to_dict(args, keys):
|
| 560 |
+
return {k: getattr(args, k) for k in keys}
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def str2bool(v):
|
| 564 |
+
"""
|
| 565 |
+
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
|
| 566 |
+
"""
|
| 567 |
+
if isinstance(v, bool):
|
| 568 |
+
return v
|
| 569 |
+
if v.lower() in ("yes", "true", "t", "y", "1"):
|
| 570 |
+
return True
|
| 571 |
+
elif v.lower() in ("no", "false", "f", "n", "0"):
|
| 572 |
+
return False
|
| 573 |
+
else:
|
| 574 |
+
raise argparse.ArgumentTypeError("boolean value expected")
|
guided_diffusion/train_util.py
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import functools
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
import blobfile as bf
|
| 6 |
+
import torch as th
|
| 7 |
+
import torch.distributed as dist
|
| 8 |
+
import torch.utils.tensorboard
|
| 9 |
+
from torch.optim import AdamW
|
| 10 |
+
import torch.cuda.amp as amp
|
| 11 |
+
|
| 12 |
+
import itertools
|
| 13 |
+
|
| 14 |
+
from . import dist_util, logger
|
| 15 |
+
from .resample import LossAwareSampler, UniformSampler
|
| 16 |
+
from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def psnr(pred: th.Tensor, target: th.Tensor, data_range: float = 2.0) -> th.Tensor:
|
| 20 |
+
"""Compute the peak signal to noise ratio."""
|
| 21 |
+
mse = th.mean((pred - target) ** 2)
|
| 22 |
+
mse = th.clamp(mse, min=1e-8)
|
| 23 |
+
return 20 * th.log10(th.tensor(data_range, device=pred.device)) - 10 * th.log10(mse)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def dice_score(pred: th.Tensor, target: th.Tensor, threshold: float = 0.0) -> th.Tensor:
|
| 27 |
+
"""Dice score for binary volumes."""
|
| 28 |
+
pred_bin = (pred > threshold).float()
|
| 29 |
+
target_bin = (target > threshold).float()
|
| 30 |
+
inter = (pred_bin * target_bin).sum()
|
| 31 |
+
return 2 * inter / (pred_bin.sum() + target_bin.sum() + 1e-8)
|
| 32 |
+
|
| 33 |
+
INITIAL_LOG_LOSS_SCALE = 20.0
|
| 34 |
+
|
| 35 |
+
def visualize(img):
|
| 36 |
+
_min = img.min()
|
| 37 |
+
_max = img.max()
|
| 38 |
+
normalized_img = (img - _min)/ (_max - _min)
|
| 39 |
+
return normalized_img
|
| 40 |
+
|
| 41 |
+
class TrainLoop:
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
*,
|
| 45 |
+
model,
|
| 46 |
+
diffusion,
|
| 47 |
+
data,
|
| 48 |
+
batch_size,
|
| 49 |
+
in_channels,
|
| 50 |
+
image_size,
|
| 51 |
+
microbatch,
|
| 52 |
+
lr,
|
| 53 |
+
ema_rate,
|
| 54 |
+
log_interval,
|
| 55 |
+
save_interval,
|
| 56 |
+
resume_checkpoint,
|
| 57 |
+
resume_step,
|
| 58 |
+
use_fp16=False,
|
| 59 |
+
fp16_scale_growth=1e-3,
|
| 60 |
+
schedule_sampler=None,
|
| 61 |
+
weight_decay=0.0,
|
| 62 |
+
lr_anneal_steps=0,
|
| 63 |
+
dataset='brats',
|
| 64 |
+
val_data=None,
|
| 65 |
+
val_interval=0,
|
| 66 |
+
summary_writer=None,
|
| 67 |
+
mode='default',
|
| 68 |
+
loss_level='image',
|
| 69 |
+
):
|
| 70 |
+
self.summary_writer = summary_writer
|
| 71 |
+
self.mode = mode
|
| 72 |
+
self.model = model
|
| 73 |
+
self.diffusion = diffusion
|
| 74 |
+
self.datal = data
|
| 75 |
+
self.dataset = dataset
|
| 76 |
+
self.iterdatal = iter(data)
|
| 77 |
+
self.val_data = val_data
|
| 78 |
+
self.iterval = iter(val_data) if val_data is not None else None
|
| 79 |
+
self.val_interval = val_interval
|
| 80 |
+
self.batch_size = batch_size
|
| 81 |
+
self.in_channels = in_channels
|
| 82 |
+
self.image_size = image_size
|
| 83 |
+
self.microbatch = microbatch if microbatch > 0 else batch_size
|
| 84 |
+
self.lr = lr
|
| 85 |
+
self.ema_rate = (
|
| 86 |
+
[ema_rate]
|
| 87 |
+
if isinstance(ema_rate, float)
|
| 88 |
+
else [float(x) for x in ema_rate.split(",")]
|
| 89 |
+
)
|
| 90 |
+
self.log_interval = log_interval
|
| 91 |
+
self.save_interval = save_interval
|
| 92 |
+
self.resume_checkpoint = resume_checkpoint
|
| 93 |
+
self.use_fp16 = use_fp16
|
| 94 |
+
if self.use_fp16:
|
| 95 |
+
self.grad_scaler = amp.GradScaler()
|
| 96 |
+
else:
|
| 97 |
+
self.grad_scaler = amp.GradScaler(enabled=False)
|
| 98 |
+
|
| 99 |
+
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
|
| 100 |
+
self.weight_decay = weight_decay
|
| 101 |
+
self.lr_anneal_steps = lr_anneal_steps
|
| 102 |
+
|
| 103 |
+
self.dwt = DWT_3D('haar')
|
| 104 |
+
self.idwt = IDWT_3D('haar')
|
| 105 |
+
|
| 106 |
+
self.loss_level = loss_level
|
| 107 |
+
|
| 108 |
+
self.step = 1
|
| 109 |
+
self.resume_step = resume_step
|
| 110 |
+
self.global_batch = self.batch_size * dist.get_world_size()
|
| 111 |
+
|
| 112 |
+
self.sync_cuda = th.cuda.is_available()
|
| 113 |
+
|
| 114 |
+
self._load_and_sync_parameters()
|
| 115 |
+
|
| 116 |
+
self.opt = AdamW(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
|
| 117 |
+
if self.resume_step:
|
| 118 |
+
print("Resume Step: " + str(self.resume_step))
|
| 119 |
+
self._load_optimizer_state()
|
| 120 |
+
|
| 121 |
+
if not th.cuda.is_available():
|
| 122 |
+
logger.warn(
|
| 123 |
+
"Training requires CUDA. "
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
def _load_and_sync_parameters(self):
|
| 127 |
+
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
| 128 |
+
|
| 129 |
+
if resume_checkpoint:
|
| 130 |
+
print('resume model ...')
|
| 131 |
+
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
|
| 132 |
+
if dist.get_rank() == 0:
|
| 133 |
+
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
|
| 134 |
+
self.model.load_state_dict(
|
| 135 |
+
dist_util.load_state_dict(
|
| 136 |
+
resume_checkpoint, map_location=dist_util.dev()
|
| 137 |
+
)
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
dist_util.sync_params(self.model.parameters())
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _load_optimizer_state(self):
|
| 144 |
+
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
| 145 |
+
opt_checkpoint = bf.join(
|
| 146 |
+
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
|
| 147 |
+
)
|
| 148 |
+
if bf.exists(opt_checkpoint):
|
| 149 |
+
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
|
| 150 |
+
state_dict = dist_util.load_state_dict(
|
| 151 |
+
opt_checkpoint, map_location=dist_util.dev()
|
| 152 |
+
)
|
| 153 |
+
self.opt.load_state_dict(state_dict)
|
| 154 |
+
else:
|
| 155 |
+
print('no optimizer checkpoint exists')
|
| 156 |
+
|
| 157 |
+
def run_loop(self):
|
| 158 |
+
import time
|
| 159 |
+
t = time.time()
|
| 160 |
+
while not self.lr_anneal_steps or self.step + self.resume_step < self.lr_anneal_steps:
|
| 161 |
+
t_total = time.time() - t
|
| 162 |
+
t = time.time()
|
| 163 |
+
if self.dataset in ['brats', 'lidc-idri', 'inpaint']:
|
| 164 |
+
try:
|
| 165 |
+
batch = next(self.iterdatal)
|
| 166 |
+
cond = {}
|
| 167 |
+
except StopIteration:
|
| 168 |
+
self.iterdatal = iter(self.datal)
|
| 169 |
+
batch = next(self.iterdatal)
|
| 170 |
+
cond = {}
|
| 171 |
+
|
| 172 |
+
if self.dataset == 'inpaint':
|
| 173 |
+
# Inpainting loader returns tuple
|
| 174 |
+
batch = tuple(b.to(dist_util.dev()) if th.is_tensor(b) else b for b in batch)
|
| 175 |
+
else:
|
| 176 |
+
batch = batch.to(dist_util.dev())
|
| 177 |
+
|
| 178 |
+
t_fwd = time.time()
|
| 179 |
+
t_load = t_fwd-t
|
| 180 |
+
|
| 181 |
+
lossmse, sample, sample_idwt = self.run_step(batch, cond)
|
| 182 |
+
|
| 183 |
+
t_fwd = time.time()-t_fwd
|
| 184 |
+
|
| 185 |
+
names = ["LLL", "LLH", "LHL", "LHH", "HLL", "HLH", "HHL", "HHH"]
|
| 186 |
+
|
| 187 |
+
if self.summary_writer is not None:
|
| 188 |
+
self.summary_writer.add_scalar('time/load', t_load, global_step=self.step + self.resume_step)
|
| 189 |
+
self.summary_writer.add_scalar('time/forward', t_fwd, global_step=self.step + self.resume_step)
|
| 190 |
+
self.summary_writer.add_scalar('time/total', t_total, global_step=self.step + self.resume_step)
|
| 191 |
+
self.summary_writer.add_scalar('loss/MSE', lossmse.item(), global_step=self.step + self.resume_step)
|
| 192 |
+
|
| 193 |
+
if self.step % 200 == 0:
|
| 194 |
+
image_size = sample_idwt.size()[2]
|
| 195 |
+
midplane = sample_idwt[0, 0, :, :, image_size // 2]
|
| 196 |
+
self.summary_writer.add_image('sample/x_0', midplane.unsqueeze(0),
|
| 197 |
+
global_step=self.step + self.resume_step)
|
| 198 |
+
|
| 199 |
+
image_size = sample.size()[2]
|
| 200 |
+
for ch in range(8):
|
| 201 |
+
midplane = sample[0, ch, :, :, image_size // 2]
|
| 202 |
+
self.summary_writer.add_image('sample/{}'.format(names[ch]), midplane.unsqueeze(0),
|
| 203 |
+
global_step=self.step + self.resume_step)
|
| 204 |
+
|
| 205 |
+
if self.step % self.log_interval == 0:
|
| 206 |
+
logger.dumpkvs()
|
| 207 |
+
|
| 208 |
+
if self.step % self.save_interval == 0:
|
| 209 |
+
self.save()
|
| 210 |
+
# Run for a finite amount of time in integration tests.
|
| 211 |
+
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
|
| 212 |
+
return
|
| 213 |
+
|
| 214 |
+
if (
|
| 215 |
+
self.val_data is not None
|
| 216 |
+
and self.val_interval > 0
|
| 217 |
+
and self.step % self.val_interval == 0
|
| 218 |
+
):
|
| 219 |
+
self._run_validation()
|
| 220 |
+
self.step += 1
|
| 221 |
+
|
| 222 |
+
# Save the last checkpoint if it wasn't already saved.
|
| 223 |
+
if (self.step - 1) % self.save_interval != 0:
|
| 224 |
+
self.save()
|
| 225 |
+
|
| 226 |
+
def run_step(self, batch, cond, label=None, info=dict()):
|
| 227 |
+
lossmse, sample, sample_idwt = self.forward_backward(batch, cond, label)
|
| 228 |
+
|
| 229 |
+
if self.use_fp16:
|
| 230 |
+
self.grad_scaler.unscale_(self.opt) # check self.grad_scaler._per_optimizer_states
|
| 231 |
+
|
| 232 |
+
# compute norms
|
| 233 |
+
with torch.no_grad():
|
| 234 |
+
param_max_norm = max([p.abs().max().item() for p in self.model.parameters()])
|
| 235 |
+
grad_max_norm = max([p.grad.abs().max().item() for p in self.model.parameters()])
|
| 236 |
+
info['norm/param_max'] = param_max_norm
|
| 237 |
+
info['norm/grad_max'] = grad_max_norm
|
| 238 |
+
|
| 239 |
+
if not torch.isfinite(lossmse): #infinite
|
| 240 |
+
if not torch.isfinite(torch.tensor(param_max_norm)):
|
| 241 |
+
logger.log(f"Model parameters contain non-finite value {param_max_norm}, entering breakpoint", level=logger.ERROR)
|
| 242 |
+
breakpoint()
|
| 243 |
+
else:
|
| 244 |
+
logger.log(f"Model parameters are finite, but loss is not: {lossmse}"
|
| 245 |
+
"\n -> update will be skipped in grad_scaler.step()", level=logger.WARN)
|
| 246 |
+
|
| 247 |
+
if self.use_fp16:
|
| 248 |
+
print("Use fp16 ...")
|
| 249 |
+
self.grad_scaler.step(self.opt)
|
| 250 |
+
self.grad_scaler.update()
|
| 251 |
+
info['scale'] = self.grad_scaler.get_scale()
|
| 252 |
+
else:
|
| 253 |
+
self.opt.step()
|
| 254 |
+
self._anneal_lr()
|
| 255 |
+
self.log_step()
|
| 256 |
+
return lossmse, sample, sample_idwt
|
| 257 |
+
|
| 258 |
+
@th.no_grad()
|
| 259 |
+
def _run_validation(self):
|
| 260 |
+
"""Run a single validation pass."""
|
| 261 |
+
if self.val_data is None:
|
| 262 |
+
return
|
| 263 |
+
|
| 264 |
+
self.model.eval()
|
| 265 |
+
try:
|
| 266 |
+
batch = next(self.iterval)
|
| 267 |
+
except StopIteration:
|
| 268 |
+
self.iterval = iter(self.val_data)
|
| 269 |
+
batch = next(self.iterval)
|
| 270 |
+
|
| 271 |
+
if self.dataset == 'inpaint':
|
| 272 |
+
batch = tuple(b.to(dist_util.dev()) if th.is_tensor(b) else b for b in batch)
|
| 273 |
+
cond = {"mask": batch[1]}
|
| 274 |
+
gt = batch[0]
|
| 275 |
+
mask = batch[1]
|
| 276 |
+
else:
|
| 277 |
+
batch = batch.to(dist_util.dev())
|
| 278 |
+
cond = {}
|
| 279 |
+
gt = batch
|
| 280 |
+
mask = th.ones_like(gt)
|
| 281 |
+
|
| 282 |
+
t, _ = self.schedule_sampler.sample(gt.shape[0], dist_util.dev())
|
| 283 |
+
loss_dict, sample_wav, sample_idwt = self.diffusion.training_losses(
|
| 284 |
+
self.model,
|
| 285 |
+
x_start=gt,
|
| 286 |
+
t=t,
|
| 287 |
+
model_kwargs=cond,
|
| 288 |
+
labels=None,
|
| 289 |
+
mode=self.mode,
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
weights = th.ones(len(loss_dict["mse_wav"])).to(sample_idwt.device)
|
| 293 |
+
val_loss = (loss_dict["mse_wav"] * weights).mean().item()
|
| 294 |
+
|
| 295 |
+
pred_region = sample_idwt * mask
|
| 296 |
+
gt_region = gt * mask
|
| 297 |
+
|
| 298 |
+
val_psnr = psnr(pred_region, gt_region).item()
|
| 299 |
+
val_dice = dice_score(pred_region, gt_region).item()
|
| 300 |
+
|
| 301 |
+
if self.summary_writer is not None:
|
| 302 |
+
self.summary_writer.add_scalar(
|
| 303 |
+
"val/loss", val_loss, global_step=self.step + self.resume_step
|
| 304 |
+
)
|
| 305 |
+
self.summary_writer.add_scalar(
|
| 306 |
+
"val/psnr", val_psnr, global_step=self.step + self.resume_step
|
| 307 |
+
)
|
| 308 |
+
self.summary_writer.add_scalar(
|
| 309 |
+
"val/dice", val_dice, global_step=self.step + self.resume_step
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
mid = pred_region.shape[-1] // 2
|
| 313 |
+
gt_slice = visualize(gt_region[0, 0, :, :, mid])
|
| 314 |
+
pred_slice = visualize(pred_region[0, 0, :, :, mid])
|
| 315 |
+
viz = th.cat([gt_slice, pred_slice], dim=-1)
|
| 316 |
+
self.summary_writer.add_image(
|
| 317 |
+
"val/inpaint_vs_gt",
|
| 318 |
+
viz.unsqueeze(0),
|
| 319 |
+
global_step=self.step + self.resume_step,
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
self.model.train()
|
| 323 |
+
|
| 324 |
+
def forward_backward(self, batch, cond, label=None):
|
| 325 |
+
for p in self.model.parameters(): # Zero out gradient
|
| 326 |
+
p.grad = None
|
| 327 |
+
|
| 328 |
+
for i in range(0, batch[0].shape[0] if self.dataset == 'inpaint' else batch.shape[0], self.microbatch):
|
| 329 |
+
if self.dataset == 'inpaint':
|
| 330 |
+
micro = batch[0][i: i + self.microbatch]
|
| 331 |
+
micro_mask = batch[1][i: i + self.microbatch]
|
| 332 |
+
micro_cond = {"mask": micro_mask}
|
| 333 |
+
else:
|
| 334 |
+
micro = batch[i: i + self.microbatch]
|
| 335 |
+
micro_cond = None
|
| 336 |
+
micro = micro.to(dist_util.dev())
|
| 337 |
+
if self.dataset == 'inpaint':
|
| 338 |
+
micro_cond = {k: v.to(dist_util.dev()) for k, v in micro_cond.items()}
|
| 339 |
+
|
| 340 |
+
if label is not None:
|
| 341 |
+
micro_label = label[i: i + self.microbatch].to(dist_util.dev())
|
| 342 |
+
else:
|
| 343 |
+
micro_label = None
|
| 344 |
+
|
| 345 |
+
last_batch = (
|
| 346 |
+
i + self.microbatch
|
| 347 |
+
) >= (batch[0].shape[0] if self.dataset == 'inpaint' else batch.shape[0])
|
| 348 |
+
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
|
| 349 |
+
|
| 350 |
+
compute_losses = functools.partial(self.diffusion.training_losses,
|
| 351 |
+
self.model,
|
| 352 |
+
x_start=micro,
|
| 353 |
+
t=t,
|
| 354 |
+
model_kwargs=micro_cond,
|
| 355 |
+
labels=micro_label,
|
| 356 |
+
mode=self.mode,
|
| 357 |
+
)
|
| 358 |
+
losses1 = compute_losses()
|
| 359 |
+
|
| 360 |
+
if isinstance(self.schedule_sampler, LossAwareSampler):
|
| 361 |
+
self.schedule_sampler.update_with_local_losses(
|
| 362 |
+
t, losses1["loss"].detach()
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
losses = losses1[0] # Loss value
|
| 366 |
+
sample = losses1[1] # Denoised subbands at t=0
|
| 367 |
+
sample_idwt = losses1[2] # Inverse wavelet transformed denoised subbands at t=0
|
| 368 |
+
|
| 369 |
+
# Log wavelet level loss
|
| 370 |
+
self.summary_writer.add_scalar('loss/mse_wav_lll', losses["mse_wav"][0].item(),
|
| 371 |
+
global_step=self.step + self.resume_step)
|
| 372 |
+
self.summary_writer.add_scalar('loss/mse_wav_llh', losses["mse_wav"][1].item(),
|
| 373 |
+
global_step=self.step + self.resume_step)
|
| 374 |
+
self.summary_writer.add_scalar('loss/mse_wav_lhl', losses["mse_wav"][2].item(),
|
| 375 |
+
global_step=self.step + self.resume_step)
|
| 376 |
+
self.summary_writer.add_scalar('loss/mse_wav_lhh', losses["mse_wav"][3].item(),
|
| 377 |
+
global_step=self.step + self.resume_step)
|
| 378 |
+
self.summary_writer.add_scalar('loss/mse_wav_hll', losses["mse_wav"][4].item(),
|
| 379 |
+
global_step=self.step + self.resume_step)
|
| 380 |
+
self.summary_writer.add_scalar('loss/mse_wav_hlh', losses["mse_wav"][5].item(),
|
| 381 |
+
global_step=self.step + self.resume_step)
|
| 382 |
+
self.summary_writer.add_scalar('loss/mse_wav_hhl', losses["mse_wav"][6].item(),
|
| 383 |
+
global_step=self.step + self.resume_step)
|
| 384 |
+
self.summary_writer.add_scalar('loss/mse_wav_hhh', losses["mse_wav"][7].item(),
|
| 385 |
+
global_step=self.step + self.resume_step)
|
| 386 |
+
|
| 387 |
+
weights = th.ones(len(losses["mse_wav"])).cuda() # Equally weight all wavelet channel losses
|
| 388 |
+
|
| 389 |
+
loss = (losses["mse_wav"] * weights).mean()
|
| 390 |
+
lossmse = loss.detach()
|
| 391 |
+
|
| 392 |
+
log_loss_dict(self.diffusion, t, {k: v * weights for k, v in losses.items()})
|
| 393 |
+
|
| 394 |
+
# perform some finiteness checks
|
| 395 |
+
if not torch.isfinite(loss):
|
| 396 |
+
logger.log(f"Encountered non-finite loss {loss}")
|
| 397 |
+
if self.use_fp16:
|
| 398 |
+
self.grad_scaler.scale(loss).backward()
|
| 399 |
+
else:
|
| 400 |
+
loss.backward()
|
| 401 |
+
|
| 402 |
+
return lossmse.detach(), sample, sample_idwt
|
| 403 |
+
|
| 404 |
+
def _anneal_lr(self):
|
| 405 |
+
if not self.lr_anneal_steps:
|
| 406 |
+
return
|
| 407 |
+
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
|
| 408 |
+
lr = self.lr * (1 - frac_done)
|
| 409 |
+
for param_group in self.opt.param_groups:
|
| 410 |
+
param_group["lr"] = lr
|
| 411 |
+
|
| 412 |
+
def log_step(self):
|
| 413 |
+
logger.logkv("step", self.step + self.resume_step)
|
| 414 |
+
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
|
| 415 |
+
|
| 416 |
+
def save(self):
|
| 417 |
+
def save_checkpoint(rate, state_dict):
|
| 418 |
+
if dist.get_rank() == 0:
|
| 419 |
+
logger.log("Saving model...")
|
| 420 |
+
if self.dataset == 'brats':
|
| 421 |
+
filename = f"brats_{(self.step+self.resume_step):06d}.pt"
|
| 422 |
+
elif self.dataset == 'lidc-idri':
|
| 423 |
+
filename = f"lidc-idri_{(self.step+self.resume_step):06d}.pt"
|
| 424 |
+
elif self.dataset == 'inpaint':
|
| 425 |
+
filename = f"inpaint_{(self.step+self.resume_step):06d}.pt"
|
| 426 |
+
else:
|
| 427 |
+
raise ValueError(f'dataset {self.dataset} not implemented')
|
| 428 |
+
|
| 429 |
+
with bf.BlobFile(bf.join(get_blob_logdir(), 'checkpoints', filename), "wb") as f:
|
| 430 |
+
th.save(state_dict, f)
|
| 431 |
+
|
| 432 |
+
save_checkpoint(0, self.model.state_dict())
|
| 433 |
+
|
| 434 |
+
if dist.get_rank() == 0:
|
| 435 |
+
checkpoint_dir = os.path.join(logger.get_dir(), 'checkpoints')
|
| 436 |
+
with bf.BlobFile(
|
| 437 |
+
bf.join(checkpoint_dir, f"opt{(self.step+self.resume_step):06d}.pt"),
|
| 438 |
+
"wb",
|
| 439 |
+
) as f:
|
| 440 |
+
th.save(self.opt.state_dict(), f)
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def parse_resume_step_from_filename(filename):
|
| 444 |
+
"""
|
| 445 |
+
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
|
| 446 |
+
checkpoint's number of steps.
|
| 447 |
+
"""
|
| 448 |
+
|
| 449 |
+
split = os.path.basename(filename)
|
| 450 |
+
split = split.split(".")[-2] # remove extension
|
| 451 |
+
split = split.split("_")[-1] # remove possible underscores, keep only last word
|
| 452 |
+
# extract trailing number
|
| 453 |
+
reversed_split = []
|
| 454 |
+
for c in reversed(split):
|
| 455 |
+
if not c.isdigit():
|
| 456 |
+
break
|
| 457 |
+
reversed_split.append(c)
|
| 458 |
+
split = ''.join(reversed(reversed_split))
|
| 459 |
+
split = ''.join(c for c in split if c.isdigit()) # remove non-digits
|
| 460 |
+
try:
|
| 461 |
+
return int(split)
|
| 462 |
+
except ValueError:
|
| 463 |
+
return 0
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def get_blob_logdir():
|
| 467 |
+
# You can change this to be a separate path to save checkpoints to
|
| 468 |
+
# a blobstore or some external drive.
|
| 469 |
+
return logger.get_dir()
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def find_resume_checkpoint():
|
| 473 |
+
# On your infrastructure, you may want to override this to automatically
|
| 474 |
+
# discover the latest checkpoint on your blob storage, etc.
|
| 475 |
+
return None
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def log_loss_dict(diffusion, ts, losses):
|
| 479 |
+
for key, values in losses.items():
|
| 480 |
+
logger.logkv_mean(key, values.mean().item())
|
| 481 |
+
# Log the quantiles (four quartiles, in particular).
|
| 482 |
+
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
|
| 483 |
+
quartile = int(4 * sub_t / diffusion.num_timesteps)
|
| 484 |
+
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
|
guided_diffusion/unet.py
ADDED
|
@@ -0,0 +1,1044 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import abstractmethod
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch as th
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from .nn import checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding
|
| 10 |
+
from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TimestepBlock(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
Any module where forward() takes timestep embeddings as a second argument.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
@abstractmethod
|
| 19 |
+
def forward(self, x, emb):
|
| 20 |
+
"""
|
| 21 |
+
Apply the module to `x` given `emb` timestep embeddings.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
| 26 |
+
"""
|
| 27 |
+
A sequential module that passes timestep embeddings to the children that
|
| 28 |
+
support it as an extra input.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def forward(self, x, emb):
|
| 32 |
+
for layer in self:
|
| 33 |
+
if isinstance(layer, TimestepBlock):
|
| 34 |
+
x = layer(x, emb)
|
| 35 |
+
else:
|
| 36 |
+
x = layer(x)
|
| 37 |
+
return x
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Upsample(nn.Module):
|
| 41 |
+
"""
|
| 42 |
+
An upsampling layer with an optional convolution.
|
| 43 |
+
|
| 44 |
+
:param channels: channels in the inputs and outputs.
|
| 45 |
+
:param use_conv: a bool determining if a convolution is applied.
|
| 46 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
| 47 |
+
upsampling occurs in the inner-two dimensions.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True):
|
| 51 |
+
super().__init__()
|
| 52 |
+
self.channels = channels
|
| 53 |
+
self.out_channels = out_channels or channels
|
| 54 |
+
self.use_conv = use_conv
|
| 55 |
+
self.dims = dims
|
| 56 |
+
self.resample_2d = resample_2d
|
| 57 |
+
if use_conv:
|
| 58 |
+
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
|
| 59 |
+
|
| 60 |
+
def forward(self, x):
|
| 61 |
+
assert x.shape[1] == self.channels
|
| 62 |
+
if self.dims == 3 and self.resample_2d:
|
| 63 |
+
x = F.interpolate(
|
| 64 |
+
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
| 65 |
+
)
|
| 66 |
+
else:
|
| 67 |
+
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
| 68 |
+
if self.use_conv:
|
| 69 |
+
x = self.conv(x)
|
| 70 |
+
return x
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class Downsample(nn.Module):
|
| 74 |
+
"""
|
| 75 |
+
A downsampling layer with an optional convolution.
|
| 76 |
+
|
| 77 |
+
:param channels: channels in the inputs and outputs.
|
| 78 |
+
:param use_conv: a bool determining if a convolution is applied.
|
| 79 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
| 80 |
+
downsampling occurs in the inner-two dimensions.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True):
|
| 84 |
+
super().__init__()
|
| 85 |
+
self.channels = channels
|
| 86 |
+
self.out_channels = out_channels or channels
|
| 87 |
+
self.use_conv = use_conv
|
| 88 |
+
self.dims = dims
|
| 89 |
+
stride = (1, 2, 2) if dims == 3 and resample_2d else 2
|
| 90 |
+
if use_conv:
|
| 91 |
+
self.op = conv_nd(
|
| 92 |
+
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
|
| 93 |
+
)
|
| 94 |
+
else:
|
| 95 |
+
assert self.channels == self.out_channels
|
| 96 |
+
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
| 97 |
+
|
| 98 |
+
def forward(self, x):
|
| 99 |
+
assert x.shape[1] == self.channels
|
| 100 |
+
return self.op(x)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class WaveletGatingDownsample(nn.Module):
|
| 104 |
+
"""
|
| 105 |
+
A wavelet gated downsampling operation.
|
| 106 |
+
|
| 107 |
+
This layer takes some input features and a timestep embedding vector as input and
|
| 108 |
+
outputs the sum over gated wavelet coefficients, thus performing a downsampling.
|
| 109 |
+
|
| 110 |
+
:param channels: channels in the inputs and outputs.
|
| 111 |
+
:param temb_dim: timestep embedding dimension.
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
def __init__(self, channels, temb_dim):
|
| 115 |
+
super().__init__()
|
| 116 |
+
# Define wavelet transform
|
| 117 |
+
self.dwt = DWT_3D('haar')
|
| 118 |
+
|
| 119 |
+
# Define gating network
|
| 120 |
+
self.pooling = nn.AdaptiveAvgPool3d(1)
|
| 121 |
+
self.fnn = nn.Sequential(
|
| 122 |
+
nn.Linear(channels + temb_dim, 128),
|
| 123 |
+
nn.SiLU(),
|
| 124 |
+
nn.Linear(128, 8),
|
| 125 |
+
)
|
| 126 |
+
self.act = nn.Sigmoid()
|
| 127 |
+
|
| 128 |
+
def forward(self, x, temb):
|
| 129 |
+
# Get gating values
|
| 130 |
+
p = self.pooling(x).squeeze(-1).squeeze(-1).squeeze(-1) # Average pool over feature dimension
|
| 131 |
+
c = th.cat((p, temb), dim=1) # Combine pooled input features and temb
|
| 132 |
+
gating_values = self.act(self.fnn(c)) # Obtain gating values
|
| 133 |
+
|
| 134 |
+
wavelet_subbands = self.dwt(x)
|
| 135 |
+
scaled_wavelet_subbands = [band * gating.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
|
| 136 |
+
for band, gating in zip(wavelet_subbands, th.split(gating_values, 1, dim=1))]
|
| 137 |
+
return sum(scaled_wavelet_subbands)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class WaveletGatingUpsample(nn.Module):
|
| 141 |
+
"""
|
| 142 |
+
A wavelet gated upsampling operation.
|
| 143 |
+
|
| 144 |
+
This layer takes some input features and a timestep embedding vector as input and
|
| 145 |
+
outputs gated inverse wavelet transformed bands, thus performing upsampling.
|
| 146 |
+
|
| 147 |
+
:param channels: channels in the inputs and outputs.
|
| 148 |
+
:param temb_dim: timestep embedding dimension.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
def __init__(self, channels, temb_dim):
|
| 152 |
+
super().__init__()
|
| 153 |
+
# Define inverse wavelet transform
|
| 154 |
+
self.idwt = IDWT_3D('haar')
|
| 155 |
+
|
| 156 |
+
# Define gating network
|
| 157 |
+
self.pooling = nn.AdaptiveAvgPool3d(1)
|
| 158 |
+
self.fnn = nn.Sequential(
|
| 159 |
+
nn.Linear(channels + temb_dim, 128),
|
| 160 |
+
nn.SiLU(),
|
| 161 |
+
nn.Linear(128, 8),
|
| 162 |
+
)
|
| 163 |
+
self.act = nn.Sigmoid()
|
| 164 |
+
|
| 165 |
+
# Define conv for channel expansion
|
| 166 |
+
self.conv_exp = nn.Conv3d(channels, channels * 8, kernel_size=1)
|
| 167 |
+
|
| 168 |
+
def forward(self, x, temb):
|
| 169 |
+
# Get gating values
|
| 170 |
+
p = self.pooling(x).squeeze(-1).squeeze(-1).squeeze(-1) # Average pool over feature dimension
|
| 171 |
+
c = th.cat((p, temb), dim=1) # Combine pooled input features and temb
|
| 172 |
+
gating_values = self.act(self.fnn(c)) # Obtain gating values
|
| 173 |
+
|
| 174 |
+
# Perform a channel expansion and chunk into 8 wavelet subbands
|
| 175 |
+
wavelet_subbands = self.conv_exp(x)
|
| 176 |
+
wavelet_subbands = wavelet_subbands.chunk(8, dim=1)
|
| 177 |
+
|
| 178 |
+
scaled_wavelet_subbands = [band * gating.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
|
| 179 |
+
for band, gating in zip(wavelet_subbands, th.split(gating_values, 1, dim=1))]
|
| 180 |
+
|
| 181 |
+
return self.idwt(*scaled_wavelet_subbands[:8])
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class ResBlock(TimestepBlock):
|
| 186 |
+
"""
|
| 187 |
+
A residual block that can optionally change the number of channels.
|
| 188 |
+
|
| 189 |
+
:param channels: the number of input channels.
|
| 190 |
+
:param emb_channels: the number of timestep embedding channels.
|
| 191 |
+
:param dropout: the rate of dropout.
|
| 192 |
+
:param out_channels: if specified, the number of out channels.
|
| 193 |
+
:param use_conv: if True and out_channels is specified, use a spatial
|
| 194 |
+
convolution instead of a smaller 1x1 convolution to change the
|
| 195 |
+
channels in the skip connection.
|
| 196 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
| 197 |
+
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
| 198 |
+
:param up: if True, use this block for upsampling.
|
| 199 |
+
:param down: if True, use this block for downsampling.
|
| 200 |
+
:param use_wgupdown: if True, use wavelet gated up- and downsampling.
|
| 201 |
+
"""
|
| 202 |
+
|
| 203 |
+
def __init__(
|
| 204 |
+
self,
|
| 205 |
+
channels,
|
| 206 |
+
emb_channels,
|
| 207 |
+
dropout,
|
| 208 |
+
out_channels=None,
|
| 209 |
+
use_conv=False,
|
| 210 |
+
use_scale_shift_norm=False,
|
| 211 |
+
dims=2,
|
| 212 |
+
use_checkpoint=False,
|
| 213 |
+
up=False,
|
| 214 |
+
down=False,
|
| 215 |
+
num_groups=32,
|
| 216 |
+
resample_2d=True,
|
| 217 |
+
):
|
| 218 |
+
super().__init__()
|
| 219 |
+
self.channels = channels
|
| 220 |
+
self.emb_channels = emb_channels
|
| 221 |
+
self.dropout = dropout
|
| 222 |
+
self.out_channels = out_channels or channels
|
| 223 |
+
self.use_conv = use_conv
|
| 224 |
+
self.use_checkpoint = use_checkpoint
|
| 225 |
+
self.use_scale_shift_norm = use_scale_shift_norm
|
| 226 |
+
self.num_groups = num_groups
|
| 227 |
+
|
| 228 |
+
self.in_layers = nn.Sequential(
|
| 229 |
+
normalization(channels, self.num_groups),
|
| 230 |
+
nn.SiLU(),
|
| 231 |
+
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
self.updown = up or down
|
| 235 |
+
|
| 236 |
+
if up:
|
| 237 |
+
# when using "standard" upsampling
|
| 238 |
+
self.h_upd = Upsample(channels, False, dims, resample_2d=resample_2d)
|
| 239 |
+
self.x_upd = Upsample(channels, False, dims, resample_2d=resample_2d)
|
| 240 |
+
|
| 241 |
+
elif down:
|
| 242 |
+
# when using "standard" downsampling
|
| 243 |
+
self.h_upd = Downsample(channels, False, dims, resample_2d=resample_2d)
|
| 244 |
+
self.x_upd = Downsample(channels, False, dims, resample_2d=resample_2d)
|
| 245 |
+
else:
|
| 246 |
+
self.h_upd = self.x_upd = nn.Identity()
|
| 247 |
+
|
| 248 |
+
self.emb_layers = nn.Sequential(
|
| 249 |
+
nn.SiLU(),
|
| 250 |
+
linear(
|
| 251 |
+
emb_channels,
|
| 252 |
+
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
|
| 253 |
+
),
|
| 254 |
+
)
|
| 255 |
+
self.out_layers = nn.Sequential(
|
| 256 |
+
normalization(self.out_channels, self.num_groups),
|
| 257 |
+
nn.SiLU(),
|
| 258 |
+
nn.Dropout(p=dropout),
|
| 259 |
+
zero_module(
|
| 260 |
+
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
|
| 261 |
+
),
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
if self.out_channels == channels:
|
| 265 |
+
self.skip_connection = nn.Identity()
|
| 266 |
+
elif use_conv:
|
| 267 |
+
self.skip_connection = conv_nd(
|
| 268 |
+
dims, channels, self.out_channels, 3, padding=1
|
| 269 |
+
)
|
| 270 |
+
else:
|
| 271 |
+
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
| 272 |
+
|
| 273 |
+
def forward(self, x, emb):
|
| 274 |
+
"""
|
| 275 |
+
Apply the block to a Tensor, conditioned on a timestep embedding.
|
| 276 |
+
|
| 277 |
+
:param x: an [N x C x ...] Tensor of features.
|
| 278 |
+
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
|
| 279 |
+
:return: an [N x C x ...] Tensor of outputs.
|
| 280 |
+
"""
|
| 281 |
+
return checkpoint(
|
| 282 |
+
self._forward, (x, emb), self.parameters(), self.use_checkpoint
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
def _forward(self, x, emb):
|
| 286 |
+
if self.updown:
|
| 287 |
+
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
|
| 288 |
+
h = in_rest(x)
|
| 289 |
+
h = self.h_upd(h)
|
| 290 |
+
x = self.x_upd(x)
|
| 291 |
+
h = in_conv(h)
|
| 292 |
+
else:
|
| 293 |
+
h = self.in_layers(x)
|
| 294 |
+
|
| 295 |
+
emb_out = self.emb_layers(emb).type(h.dtype)
|
| 296 |
+
|
| 297 |
+
while len(emb_out.shape) < len(h.shape):
|
| 298 |
+
emb_out = emb_out[..., None]
|
| 299 |
+
|
| 300 |
+
if self.use_scale_shift_norm:
|
| 301 |
+
print("You use scale-shift norm")
|
| 302 |
+
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
| 303 |
+
scale, shift = th.chunk(emb_out, 2, dim=1)
|
| 304 |
+
h = out_norm(h) * (1 + scale) + shift
|
| 305 |
+
h = out_rest(h)
|
| 306 |
+
|
| 307 |
+
else:
|
| 308 |
+
h = h + emb_out
|
| 309 |
+
h = self.out_layers(h)
|
| 310 |
+
|
| 311 |
+
return self.skip_connection(x) + h
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
class AttentionBlock(nn.Module):
|
| 315 |
+
"""
|
| 316 |
+
An attention block that allows spatial positions to attend to each other.
|
| 317 |
+
|
| 318 |
+
Originally ported from here, but adapted to the N-d case.
|
| 319 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
| 320 |
+
"""
|
| 321 |
+
|
| 322 |
+
def __init__(
|
| 323 |
+
self,
|
| 324 |
+
channels,
|
| 325 |
+
num_heads=1,
|
| 326 |
+
num_head_channels=-1,
|
| 327 |
+
use_checkpoint=False,
|
| 328 |
+
use_new_attention_order=False,
|
| 329 |
+
num_groups=32,
|
| 330 |
+
):
|
| 331 |
+
super().__init__()
|
| 332 |
+
self.channels = channels
|
| 333 |
+
if num_head_channels == -1:
|
| 334 |
+
self.num_heads = num_heads
|
| 335 |
+
else:
|
| 336 |
+
assert (
|
| 337 |
+
channels % num_head_channels == 0
|
| 338 |
+
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
| 339 |
+
self.num_heads = channels // num_head_channels
|
| 340 |
+
self.use_checkpoint = use_checkpoint
|
| 341 |
+
self.norm = normalization(channels, num_groups)
|
| 342 |
+
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
| 343 |
+
if use_new_attention_order:
|
| 344 |
+
self.attention = QKVAttention(self.num_heads)
|
| 345 |
+
else:
|
| 346 |
+
# split heads before split qkv
|
| 347 |
+
self.attention = QKVAttentionLegacy(self.num_heads)
|
| 348 |
+
|
| 349 |
+
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
| 350 |
+
|
| 351 |
+
def forward(self, x):
|
| 352 |
+
return checkpoint(self._forward, (x,), self.parameters(), True)
|
| 353 |
+
|
| 354 |
+
def _forward(self, x):
|
| 355 |
+
b, c, *spatial = x.shape
|
| 356 |
+
x = x.reshape(b, c, -1)
|
| 357 |
+
qkv = self.qkv(self.norm(x))
|
| 358 |
+
h = self.attention(qkv)
|
| 359 |
+
h = self.proj_out(h)
|
| 360 |
+
return (x + h).reshape(b, c, *spatial)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def count_flops_attn(model, _x, y):
|
| 364 |
+
"""
|
| 365 |
+
A counter for the `thop` package to count the operations in an
|
| 366 |
+
attention operation.
|
| 367 |
+
Meant to be used like:
|
| 368 |
+
macs, params = thop.profile(
|
| 369 |
+
model,
|
| 370 |
+
inputs=(inputs, timestamps),
|
| 371 |
+
custom_ops={QKVAttention: QKVAttention.count_flops},
|
| 372 |
+
)
|
| 373 |
+
"""
|
| 374 |
+
b, c, *spatial = y[0].shape
|
| 375 |
+
num_spatial = int(np.prod(spatial))
|
| 376 |
+
# We perform two matmuls with the same number of ops.
|
| 377 |
+
# The first computes the weight matrix, the second computes
|
| 378 |
+
# the combination of the value vectors.
|
| 379 |
+
matmul_ops = 2 * b * (num_spatial ** 2) * c
|
| 380 |
+
model.total_ops += th.DoubleTensor([matmul_ops])
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class QKVAttentionLegacy(nn.Module):
|
| 384 |
+
"""
|
| 385 |
+
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
|
| 386 |
+
"""
|
| 387 |
+
|
| 388 |
+
def __init__(self, n_heads):
|
| 389 |
+
super().__init__()
|
| 390 |
+
self.n_heads = n_heads
|
| 391 |
+
|
| 392 |
+
def forward(self, qkv):
|
| 393 |
+
"""
|
| 394 |
+
Apply QKV attention.
|
| 395 |
+
|
| 396 |
+
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
|
| 397 |
+
:return: an [N x (H * C) x T] tensor after attention.
|
| 398 |
+
"""
|
| 399 |
+
bs, width, length = qkv.shape
|
| 400 |
+
assert width % (3 * self.n_heads) == 0
|
| 401 |
+
ch = width // (3 * self.n_heads)
|
| 402 |
+
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
|
| 403 |
+
scale = 1 / math.sqrt(math.sqrt(ch))
|
| 404 |
+
weight = th.einsum(
|
| 405 |
+
"bct,bcs->bts", q * scale, k * scale
|
| 406 |
+
) # More stable with f16 than dividing afterwards
|
| 407 |
+
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 408 |
+
a = th.einsum("bts,bcs->bct", weight, v)
|
| 409 |
+
return a.reshape(bs, -1, length)
|
| 410 |
+
|
| 411 |
+
@staticmethod
|
| 412 |
+
def count_flops(model, _x, y):
|
| 413 |
+
return count_flops_attn(model, _x, y)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
class QKVAttention(nn.Module):
|
| 417 |
+
"""
|
| 418 |
+
A module which performs QKV attention and splits in a different order.
|
| 419 |
+
"""
|
| 420 |
+
|
| 421 |
+
def __init__(self, n_heads):
|
| 422 |
+
super().__init__()
|
| 423 |
+
self.n_heads = n_heads
|
| 424 |
+
|
| 425 |
+
def forward(self, qkv):
|
| 426 |
+
"""
|
| 427 |
+
Apply QKV attention.
|
| 428 |
+
|
| 429 |
+
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
|
| 430 |
+
:return: an [N x (H * C) x T] tensor after attention.
|
| 431 |
+
"""
|
| 432 |
+
bs, width, length = qkv.shape
|
| 433 |
+
assert width % (3 * self.n_heads) == 0
|
| 434 |
+
ch = width // (3 * self.n_heads)
|
| 435 |
+
q, k, v = qkv.chunk(3, dim=1)
|
| 436 |
+
scale = 1 / math.sqrt(math.sqrt(ch))
|
| 437 |
+
weight = th.einsum(
|
| 438 |
+
"bct,bcs->bts",
|
| 439 |
+
(q * scale).view(bs * self.n_heads, ch, length),
|
| 440 |
+
(k * scale).view(bs * self.n_heads, ch, length),
|
| 441 |
+
) # More stable with f16 than dividing afterwards
|
| 442 |
+
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 443 |
+
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
|
| 444 |
+
return a.reshape(bs, -1, length)
|
| 445 |
+
|
| 446 |
+
@staticmethod
|
| 447 |
+
def count_flops(model, _x, y):
|
| 448 |
+
return count_flops_attn(model, _x, y)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class UNetModel(nn.Module):
|
| 452 |
+
"""
|
| 453 |
+
The full UNet model with attention and timestep embedding.
|
| 454 |
+
|
| 455 |
+
:param in_channels: channels in the input Tensor.
|
| 456 |
+
:param model_channels: base channel count for the model.
|
| 457 |
+
:param out_channels: channels in the output Tensor.
|
| 458 |
+
:param num_res_blocks: number of residual blocks per downsample.
|
| 459 |
+
:param attention_resolutions: a collection of downsample rates at which
|
| 460 |
+
attention will take place. May be a set, list, or tuple.
|
| 461 |
+
For example, if this contains 4, then at 4x downsampling, attention
|
| 462 |
+
will be used.
|
| 463 |
+
:param dropout: the dropout probability.
|
| 464 |
+
:param channel_mult: channel multiplier for each level of the UNet.
|
| 465 |
+
:param conv_resample: if True, use learned convolutions for upsampling and
|
| 466 |
+
downsampling.
|
| 467 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
| 468 |
+
:param num_classes: if specified (as an int), then this model will be
|
| 469 |
+
class-conditional with `num_classes` classes.
|
| 470 |
+
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
| 471 |
+
:param num_heads: the number of attention heads in each attention layer.
|
| 472 |
+
:param num_heads_channels: if specified, ignore num_heads and instead use
|
| 473 |
+
a fixed channel width per attention head.
|
| 474 |
+
:param num_heads_upsample: works with num_heads to set a different number
|
| 475 |
+
of heads for upsampling. Deprecated.
|
| 476 |
+
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
| 477 |
+
:param resblock_updown: use residual blocks for up/downsampling.
|
| 478 |
+
:param use_new_attention_order: use a different attention pattern for potentially
|
| 479 |
+
increased efficiency.
|
| 480 |
+
"""
|
| 481 |
+
|
| 482 |
+
def __init__(
|
| 483 |
+
self,
|
| 484 |
+
image_size,
|
| 485 |
+
in_channels,
|
| 486 |
+
model_channels,
|
| 487 |
+
out_channels,
|
| 488 |
+
num_res_blocks,
|
| 489 |
+
attention_resolutions,
|
| 490 |
+
dropout=0,
|
| 491 |
+
channel_mult=(1, 2, 4, 8),
|
| 492 |
+
conv_resample=True,
|
| 493 |
+
dims=2,
|
| 494 |
+
num_classes=None,
|
| 495 |
+
use_checkpoint=False,
|
| 496 |
+
use_fp16=False,
|
| 497 |
+
num_heads=1,
|
| 498 |
+
num_head_channels=-1,
|
| 499 |
+
num_heads_upsample=-1,
|
| 500 |
+
use_scale_shift_norm=False,
|
| 501 |
+
resblock_updown=False,
|
| 502 |
+
use_new_attention_order=False,
|
| 503 |
+
num_groups=32,
|
| 504 |
+
bottleneck_attention=True,
|
| 505 |
+
resample_2d=True,
|
| 506 |
+
additive_skips=False,
|
| 507 |
+
decoder_device_thresh=0,
|
| 508 |
+
):
|
| 509 |
+
super().__init__()
|
| 510 |
+
|
| 511 |
+
if num_heads_upsample == -1:
|
| 512 |
+
num_heads_upsample = num_heads
|
| 513 |
+
|
| 514 |
+
self.image_size = image_size
|
| 515 |
+
self.in_channels = in_channels
|
| 516 |
+
self.model_channels = model_channels
|
| 517 |
+
self.out_channels = out_channels
|
| 518 |
+
self.num_res_blocks = num_res_blocks
|
| 519 |
+
self.attention_resolutions = attention_resolutions
|
| 520 |
+
self.dropout = dropout
|
| 521 |
+
self.channel_mult = channel_mult
|
| 522 |
+
self.conv_resample = conv_resample
|
| 523 |
+
self.num_classes = num_classes
|
| 524 |
+
self.use_checkpoint = use_checkpoint
|
| 525 |
+
self.num_heads = num_heads
|
| 526 |
+
self.num_head_channels = num_head_channels
|
| 527 |
+
self.num_heads_upsample = num_heads_upsample
|
| 528 |
+
self.num_groups = num_groups
|
| 529 |
+
self.bottleneck_attention = bottleneck_attention
|
| 530 |
+
self.devices = None
|
| 531 |
+
self.decoder_device_thresh = decoder_device_thresh
|
| 532 |
+
self.additive_skips = additive_skips
|
| 533 |
+
|
| 534 |
+
time_embed_dim = model_channels * 4
|
| 535 |
+
self.time_embed = nn.Sequential(
|
| 536 |
+
linear(model_channels, time_embed_dim),
|
| 537 |
+
nn.SiLU(),
|
| 538 |
+
linear(time_embed_dim, time_embed_dim),
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
if self.num_classes is not None:
|
| 542 |
+
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
| 543 |
+
|
| 544 |
+
self.input_blocks = nn.ModuleList(
|
| 545 |
+
[
|
| 546 |
+
TimestepEmbedSequential(
|
| 547 |
+
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
| 548 |
+
)
|
| 549 |
+
]
|
| 550 |
+
)
|
| 551 |
+
self._feature_size = model_channels
|
| 552 |
+
input_block_chans = [model_channels]
|
| 553 |
+
ch = model_channels
|
| 554 |
+
ds = 1
|
| 555 |
+
|
| 556 |
+
###############################################################
|
| 557 |
+
# INPUT block
|
| 558 |
+
###############################################################
|
| 559 |
+
for level, mult in enumerate(channel_mult):
|
| 560 |
+
for _ in range(num_res_blocks):
|
| 561 |
+
layers = [
|
| 562 |
+
ResBlock(
|
| 563 |
+
channels=ch,
|
| 564 |
+
emb_channels=time_embed_dim,
|
| 565 |
+
dropout=dropout,
|
| 566 |
+
out_channels=mult * model_channels,
|
| 567 |
+
dims=dims,
|
| 568 |
+
use_checkpoint=use_checkpoint,
|
| 569 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 570 |
+
num_groups=self.num_groups,
|
| 571 |
+
resample_2d=resample_2d,
|
| 572 |
+
)
|
| 573 |
+
]
|
| 574 |
+
ch = mult * model_channels
|
| 575 |
+
if ds in attention_resolutions:
|
| 576 |
+
layers.append(
|
| 577 |
+
AttentionBlock(
|
| 578 |
+
ch,
|
| 579 |
+
use_checkpoint=use_checkpoint,
|
| 580 |
+
num_heads=num_heads,
|
| 581 |
+
num_head_channels=num_head_channels,
|
| 582 |
+
use_new_attention_order=use_new_attention_order,
|
| 583 |
+
num_groups=self.num_groups,
|
| 584 |
+
)
|
| 585 |
+
)
|
| 586 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
| 587 |
+
self._feature_size += ch
|
| 588 |
+
input_block_chans.append(ch)
|
| 589 |
+
if level != len(channel_mult) - 1:
|
| 590 |
+
out_ch = ch
|
| 591 |
+
self.input_blocks.append(
|
| 592 |
+
TimestepEmbedSequential(
|
| 593 |
+
ResBlock(
|
| 594 |
+
ch,
|
| 595 |
+
time_embed_dim,
|
| 596 |
+
dropout,
|
| 597 |
+
out_channels=out_ch,
|
| 598 |
+
dims=dims,
|
| 599 |
+
use_checkpoint=use_checkpoint,
|
| 600 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 601 |
+
down=True,
|
| 602 |
+
num_groups=self.num_groups,
|
| 603 |
+
resample_2d=resample_2d,
|
| 604 |
+
)
|
| 605 |
+
if resblock_updown
|
| 606 |
+
else Downsample(
|
| 607 |
+
ch,
|
| 608 |
+
conv_resample,
|
| 609 |
+
dims=dims,
|
| 610 |
+
out_channels=out_ch,
|
| 611 |
+
resample_2d=resample_2d,
|
| 612 |
+
)
|
| 613 |
+
)
|
| 614 |
+
)
|
| 615 |
+
ch = out_ch
|
| 616 |
+
input_block_chans.append(ch)
|
| 617 |
+
ds *= 2
|
| 618 |
+
self._feature_size += ch
|
| 619 |
+
|
| 620 |
+
self.input_block_chans_bk = input_block_chans[:]
|
| 621 |
+
################################################################
|
| 622 |
+
# Middle block
|
| 623 |
+
################################################################
|
| 624 |
+
self.middle_block = TimestepEmbedSequential(
|
| 625 |
+
ResBlock(
|
| 626 |
+
ch,
|
| 627 |
+
time_embed_dim,
|
| 628 |
+
dropout,
|
| 629 |
+
dims=dims,
|
| 630 |
+
use_checkpoint=use_checkpoint,
|
| 631 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 632 |
+
num_groups=self.num_groups,
|
| 633 |
+
resample_2d=resample_2d,
|
| 634 |
+
),
|
| 635 |
+
*([AttentionBlock(
|
| 636 |
+
ch,
|
| 637 |
+
use_checkpoint=use_checkpoint,
|
| 638 |
+
num_heads=num_heads,
|
| 639 |
+
num_head_channels=num_head_channels,
|
| 640 |
+
use_new_attention_order=use_new_attention_order,
|
| 641 |
+
num_groups=self.num_groups,
|
| 642 |
+
)] if self.bottleneck_attention else [])
|
| 643 |
+
,
|
| 644 |
+
ResBlock(
|
| 645 |
+
ch,
|
| 646 |
+
time_embed_dim,
|
| 647 |
+
dropout,
|
| 648 |
+
dims=dims,
|
| 649 |
+
use_checkpoint=use_checkpoint,
|
| 650 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 651 |
+
num_groups=self.num_groups,
|
| 652 |
+
resample_2d=resample_2d,
|
| 653 |
+
),
|
| 654 |
+
)
|
| 655 |
+
self._feature_size += ch
|
| 656 |
+
|
| 657 |
+
####################################################################
|
| 658 |
+
# OUTPUT BLOCKS
|
| 659 |
+
####################################################################
|
| 660 |
+
self.output_blocks = nn.ModuleList([])
|
| 661 |
+
for level, mult in list(enumerate(channel_mult))[::-1]:
|
| 662 |
+
for i in range(num_res_blocks + 1):
|
| 663 |
+
ich = input_block_chans.pop()
|
| 664 |
+
mid_ch = model_channels * mult if not self.additive_skips else (
|
| 665 |
+
input_block_chans[-1] if input_block_chans else model_channels
|
| 666 |
+
)
|
| 667 |
+
layers = [
|
| 668 |
+
ResBlock(
|
| 669 |
+
ch + ich if not self.additive_skips else ch,
|
| 670 |
+
time_embed_dim,
|
| 671 |
+
dropout,
|
| 672 |
+
out_channels=mid_ch,
|
| 673 |
+
dims=dims,
|
| 674 |
+
use_checkpoint=use_checkpoint,
|
| 675 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 676 |
+
num_groups=self.num_groups,
|
| 677 |
+
resample_2d=resample_2d,
|
| 678 |
+
)
|
| 679 |
+
]
|
| 680 |
+
if ds in attention_resolutions:
|
| 681 |
+
layers.append(
|
| 682 |
+
AttentionBlock(
|
| 683 |
+
mid_ch,
|
| 684 |
+
use_checkpoint=use_checkpoint,
|
| 685 |
+
num_heads=num_heads_upsample,
|
| 686 |
+
num_head_channels=num_head_channels,
|
| 687 |
+
use_new_attention_order=use_new_attention_order,
|
| 688 |
+
num_groups=self.num_groups,
|
| 689 |
+
)
|
| 690 |
+
)
|
| 691 |
+
ch = mid_ch
|
| 692 |
+
if level and i == num_res_blocks:
|
| 693 |
+
out_ch = ch
|
| 694 |
+
layers.append(
|
| 695 |
+
ResBlock(
|
| 696 |
+
mid_ch,
|
| 697 |
+
time_embed_dim,
|
| 698 |
+
dropout,
|
| 699 |
+
out_channels=out_ch,
|
| 700 |
+
dims=dims,
|
| 701 |
+
use_checkpoint=use_checkpoint,
|
| 702 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 703 |
+
up=True,
|
| 704 |
+
num_groups=self.num_groups,
|
| 705 |
+
resample_2d=resample_2d,
|
| 706 |
+
)
|
| 707 |
+
if resblock_updown
|
| 708 |
+
else Upsample(
|
| 709 |
+
mid_ch,
|
| 710 |
+
conv_resample,
|
| 711 |
+
dims=dims,
|
| 712 |
+
out_channels=out_ch,
|
| 713 |
+
resample_2d=resample_2d
|
| 714 |
+
)
|
| 715 |
+
)
|
| 716 |
+
ds //= 2
|
| 717 |
+
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
| 718 |
+
self._feature_size += ch
|
| 719 |
+
mid_ch = ch
|
| 720 |
+
|
| 721 |
+
self.out = nn.Sequential(
|
| 722 |
+
normalization(ch, self.num_groups),
|
| 723 |
+
nn.SiLU(),
|
| 724 |
+
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
def to(self, *args, **kwargs):
|
| 728 |
+
"""
|
| 729 |
+
we overwrite the to() method for the case where we
|
| 730 |
+
distribute parts of our model to different devices
|
| 731 |
+
"""
|
| 732 |
+
if isinstance(args[0], (list, tuple)) and len(args[0]) > 1:
|
| 733 |
+
assert not kwargs and len(args) == 1
|
| 734 |
+
# distribute to multiple devices
|
| 735 |
+
self.devices = args[0]
|
| 736 |
+
# move first half to first device, second half to second device
|
| 737 |
+
self.input_blocks.to(self.devices[0])
|
| 738 |
+
self.time_embed.to(self.devices[0])
|
| 739 |
+
self.middle_block.to(self.devices[0]) # maybe devices 0
|
| 740 |
+
for k, b in enumerate(self.output_blocks):
|
| 741 |
+
if k < self.decoder_device_thresh:
|
| 742 |
+
b.to(self.devices[0])
|
| 743 |
+
else: # after threshold
|
| 744 |
+
b.to(self.devices[1])
|
| 745 |
+
self.out.to(self.devices[0])
|
| 746 |
+
print(f"distributed UNet components to devices {self.devices}")
|
| 747 |
+
|
| 748 |
+
else: # default behaviour
|
| 749 |
+
super().to(*args, **kwargs)
|
| 750 |
+
if self.devices is None: # if self.devices has not been set yet, read it from params
|
| 751 |
+
p = next(self.parameters())
|
| 752 |
+
self.devices = [p.device, p.device]
|
| 753 |
+
|
| 754 |
+
def forward(self, x, timesteps, y=None):
|
| 755 |
+
"""
|
| 756 |
+
Apply the model to an input batch.
|
| 757 |
+
|
| 758 |
+
:param x: an [N x C x ...] Tensor of inputs.
|
| 759 |
+
:param timesteps: a 1-D batch of timesteps.
|
| 760 |
+
:param y: an [N] Tensor of labels, if class-conditional.
|
| 761 |
+
:return: an [N x C x ...] Tensor of outputs.
|
| 762 |
+
"""
|
| 763 |
+
assert (y is not None) == (
|
| 764 |
+
self.num_classes is not None
|
| 765 |
+
), "must specify y if and only if the model is class-conditional"
|
| 766 |
+
assert x.device == self.devices[0], f"{x.device=} does not match {self.devices[0]=}"
|
| 767 |
+
assert timesteps.device == self.devices[0], f"{timesteps.device=} does not match {self.devices[0]=}"
|
| 768 |
+
|
| 769 |
+
hs = []
|
| 770 |
+
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
|
| 771 |
+
|
| 772 |
+
if self.num_classes is not None:
|
| 773 |
+
assert y.shape == (x.shape[0],)
|
| 774 |
+
emb = emb + self.label_emb(y)
|
| 775 |
+
|
| 776 |
+
h = x
|
| 777 |
+
self.hs_shapes = []
|
| 778 |
+
for module in self.input_blocks:
|
| 779 |
+
h = module(h, emb)
|
| 780 |
+
hs.append(h)
|
| 781 |
+
self.hs_shapes.append(h.shape)
|
| 782 |
+
|
| 783 |
+
h = self.middle_block(h, emb)
|
| 784 |
+
|
| 785 |
+
for k, module in enumerate(self.output_blocks):
|
| 786 |
+
new_hs = hs.pop()
|
| 787 |
+
if k == self.decoder_device_thresh:
|
| 788 |
+
h = h.to(self.devices[1])
|
| 789 |
+
emb = emb.to(self.devices[1])
|
| 790 |
+
if k >= self.decoder_device_thresh:
|
| 791 |
+
new_hs = new_hs.to(self.devices[1])
|
| 792 |
+
|
| 793 |
+
if self.additive_skips:
|
| 794 |
+
h = (h + new_hs) / 2
|
| 795 |
+
else:
|
| 796 |
+
h = th.cat([h, new_hs], dim=1)
|
| 797 |
+
|
| 798 |
+
h = module(h, emb)
|
| 799 |
+
h = h.to(self.devices[0])
|
| 800 |
+
return self.out(h)
|
| 801 |
+
|
| 802 |
+
|
| 803 |
+
class SuperResModel(UNetModel):
|
| 804 |
+
"""
|
| 805 |
+
A UNetModel that performs super-resolution.
|
| 806 |
+
|
| 807 |
+
Expects an extra kwarg `low_res` to condition on a low-resolution image.
|
| 808 |
+
"""
|
| 809 |
+
|
| 810 |
+
def __init__(self, image_size, in_channels, *args, **kwargs):
|
| 811 |
+
super().__init__(image_size, in_channels * 2, *args, **kwargs)
|
| 812 |
+
|
| 813 |
+
def forward(self, x, timesteps, low_res=None, **kwargs):
|
| 814 |
+
_, _, new_height, new_width = x.shape
|
| 815 |
+
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
|
| 816 |
+
x = th.cat([x, upsampled], dim=1)
|
| 817 |
+
return super().forward(x, timesteps, **kwargs)
|
| 818 |
+
|
| 819 |
+
|
| 820 |
+
class EncoderUNetModel(nn.Module):
|
| 821 |
+
"""
|
| 822 |
+
The half UNet model with attention and timestep embedding.
|
| 823 |
+
|
| 824 |
+
For usage, see UNet.
|
| 825 |
+
"""
|
| 826 |
+
|
| 827 |
+
def __init__(
|
| 828 |
+
self,
|
| 829 |
+
image_size,
|
| 830 |
+
in_channels,
|
| 831 |
+
model_channels,
|
| 832 |
+
out_channels,
|
| 833 |
+
num_res_blocks,
|
| 834 |
+
attention_resolutions,
|
| 835 |
+
dropout=0,
|
| 836 |
+
channel_mult=(1, 2, 4, 8),
|
| 837 |
+
conv_resample=True,
|
| 838 |
+
dims=2,
|
| 839 |
+
use_checkpoint=False,
|
| 840 |
+
use_fp16=False,
|
| 841 |
+
num_heads=1,
|
| 842 |
+
num_head_channels=-1,
|
| 843 |
+
num_heads_upsample=-1,
|
| 844 |
+
use_scale_shift_norm=False,
|
| 845 |
+
resblock_updown=False,
|
| 846 |
+
use_new_attention_order=False,
|
| 847 |
+
pool="adaptive",
|
| 848 |
+
num_groups=32,
|
| 849 |
+
resample_2d=True,
|
| 850 |
+
):
|
| 851 |
+
super().__init__()
|
| 852 |
+
|
| 853 |
+
if num_heads_upsample == -1:
|
| 854 |
+
num_heads_upsample = num_heads
|
| 855 |
+
|
| 856 |
+
self.in_channels = in_channels
|
| 857 |
+
self.model_channels = model_channels
|
| 858 |
+
self.out_channels = out_channels
|
| 859 |
+
self.num_res_blocks = num_res_blocks
|
| 860 |
+
self.attention_resolutions = attention_resolutions
|
| 861 |
+
self.dropout = dropout
|
| 862 |
+
self.channel_mult = channel_mult
|
| 863 |
+
self.conv_resample = conv_resample
|
| 864 |
+
self.use_checkpoint = use_checkpoint
|
| 865 |
+
self.dtype = th.float16 if use_fp16 else th.float32
|
| 866 |
+
self.num_heads = num_heads
|
| 867 |
+
self.num_head_channels = num_head_channels
|
| 868 |
+
self.num_heads_upsample = num_heads_upsample
|
| 869 |
+
self.num_groups = num_groups
|
| 870 |
+
|
| 871 |
+
time_embed_dim = model_channels * 4
|
| 872 |
+
self.time_embed = nn.Sequential(
|
| 873 |
+
linear(model_channels, time_embed_dim),
|
| 874 |
+
nn.SiLU(),
|
| 875 |
+
linear(time_embed_dim, time_embed_dim),
|
| 876 |
+
)
|
| 877 |
+
|
| 878 |
+
self.input_blocks = nn.ModuleList(
|
| 879 |
+
[
|
| 880 |
+
TimestepEmbedSequential(
|
| 881 |
+
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
| 882 |
+
)
|
| 883 |
+
]
|
| 884 |
+
)
|
| 885 |
+
self._feature_size = model_channels
|
| 886 |
+
input_block_chans = [model_channels]
|
| 887 |
+
ch = model_channels
|
| 888 |
+
ds = 1
|
| 889 |
+
for level, mult in enumerate(channel_mult):
|
| 890 |
+
for _ in range(num_res_blocks):
|
| 891 |
+
layers = [
|
| 892 |
+
ResBlock(
|
| 893 |
+
ch,
|
| 894 |
+
time_embed_dim,
|
| 895 |
+
dropout,
|
| 896 |
+
out_channels=mult * model_channels,
|
| 897 |
+
dims=dims,
|
| 898 |
+
use_checkpoint=use_checkpoint,
|
| 899 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 900 |
+
num_groups=self.num_groups,
|
| 901 |
+
resample_2d=resample_2d,
|
| 902 |
+
)
|
| 903 |
+
]
|
| 904 |
+
ch = mult * model_channels
|
| 905 |
+
if ds in attention_resolutions:
|
| 906 |
+
layers.append(
|
| 907 |
+
AttentionBlock(
|
| 908 |
+
ch,
|
| 909 |
+
use_checkpoint=use_checkpoint,
|
| 910 |
+
num_heads=num_heads,
|
| 911 |
+
num_head_channels=num_head_channels,
|
| 912 |
+
use_new_attention_order=use_new_attention_order,
|
| 913 |
+
num_groups=self.num_groups,
|
| 914 |
+
)
|
| 915 |
+
)
|
| 916 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
| 917 |
+
self._feature_size += ch
|
| 918 |
+
input_block_chans.append(ch)
|
| 919 |
+
if level != len(channel_mult) - 1:
|
| 920 |
+
out_ch = ch
|
| 921 |
+
self.input_blocks.append(
|
| 922 |
+
TimestepEmbedSequential(
|
| 923 |
+
ResBlock(
|
| 924 |
+
ch,
|
| 925 |
+
time_embed_dim,
|
| 926 |
+
dropout,
|
| 927 |
+
out_channels=out_ch,
|
| 928 |
+
dims=dims,
|
| 929 |
+
use_checkpoint=use_checkpoint,
|
| 930 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 931 |
+
down=True,
|
| 932 |
+
num_groups=self.num_groups,
|
| 933 |
+
resample_2d=resample_2d,
|
| 934 |
+
)
|
| 935 |
+
if resblock_updown
|
| 936 |
+
else Downsample(
|
| 937 |
+
ch, conv_resample, dims=dims, out_channels=out_ch,
|
| 938 |
+
)
|
| 939 |
+
)
|
| 940 |
+
)
|
| 941 |
+
ch = out_ch
|
| 942 |
+
input_block_chans.append(ch)
|
| 943 |
+
ds *= 2
|
| 944 |
+
self._feature_size += ch
|
| 945 |
+
|
| 946 |
+
self.middle_block = TimestepEmbedSequential(
|
| 947 |
+
ResBlock(
|
| 948 |
+
ch,
|
| 949 |
+
time_embed_dim,
|
| 950 |
+
dropout,
|
| 951 |
+
dims=dims,
|
| 952 |
+
use_checkpoint=use_checkpoint,
|
| 953 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 954 |
+
num_groups=self.num_groups,
|
| 955 |
+
resample_2d=resample_2d,
|
| 956 |
+
),
|
| 957 |
+
AttentionBlock(
|
| 958 |
+
ch,
|
| 959 |
+
use_checkpoint=use_checkpoint,
|
| 960 |
+
num_heads=num_heads,
|
| 961 |
+
num_head_channels=num_head_channels,
|
| 962 |
+
use_new_attention_order=use_new_attention_order,
|
| 963 |
+
num_groups=self.num_groups,
|
| 964 |
+
),
|
| 965 |
+
ResBlock(
|
| 966 |
+
ch,
|
| 967 |
+
time_embed_dim,
|
| 968 |
+
dropout,
|
| 969 |
+
dims=dims,
|
| 970 |
+
use_checkpoint=use_checkpoint,
|
| 971 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 972 |
+
num_groups=self.num_groups,
|
| 973 |
+
resample_2d=resample_2d,
|
| 974 |
+
),
|
| 975 |
+
)
|
| 976 |
+
self._feature_size += ch
|
| 977 |
+
self.pool = pool
|
| 978 |
+
# global average pooling
|
| 979 |
+
spatial_dims = (2, 3, 4, 5)[:dims]
|
| 980 |
+
self.gap = lambda x: x.mean(dim=spatial_dims)
|
| 981 |
+
self.cam_feature_maps = None
|
| 982 |
+
print('pool', pool)
|
| 983 |
+
if pool == "adaptive":
|
| 984 |
+
self.out = nn.Sequential(
|
| 985 |
+
normalization(ch, self.num_groups),
|
| 986 |
+
nn.SiLU(),
|
| 987 |
+
nn.AdaptiveAvgPool2d((1, 1)),
|
| 988 |
+
zero_module(conv_nd(dims, ch, out_channels, 1)),
|
| 989 |
+
nn.Flatten(),
|
| 990 |
+
)
|
| 991 |
+
elif pool == "attention":
|
| 992 |
+
assert num_head_channels != -1
|
| 993 |
+
self.out = nn.Sequential(
|
| 994 |
+
normalization(ch, self.num_groups),
|
| 995 |
+
nn.SiLU(),
|
| 996 |
+
AttentionPool2d(
|
| 997 |
+
(image_size // ds), ch, num_head_channels, out_channels
|
| 998 |
+
),
|
| 999 |
+
)
|
| 1000 |
+
elif pool == "spatial":
|
| 1001 |
+
print('spatial')
|
| 1002 |
+
self.out = nn.Linear(256, self.out_channels)
|
| 1003 |
+
elif pool == "spatial_v2":
|
| 1004 |
+
self.out = nn.Sequential(
|
| 1005 |
+
nn.Linear(self._feature_size, 2048),
|
| 1006 |
+
normalization(2048, self.num_groups),
|
| 1007 |
+
nn.SiLU(),
|
| 1008 |
+
nn.Linear(2048, self.out_channels),
|
| 1009 |
+
)
|
| 1010 |
+
else:
|
| 1011 |
+
raise NotImplementedError(f"Unexpected {pool} pooling")
|
| 1012 |
+
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
def forward(self, x, timesteps):
|
| 1016 |
+
"""
|
| 1017 |
+
Apply the model to an input batch.
|
| 1018 |
+
|
| 1019 |
+
:param x: an [N x C x ...] Tensor of inputs.
|
| 1020 |
+
:param timesteps: a 1-D batch of timesteps.
|
| 1021 |
+
:return: an [N x K] Tensor of outputs.
|
| 1022 |
+
"""
|
| 1023 |
+
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
|
| 1024 |
+
|
| 1025 |
+
results = []
|
| 1026 |
+
h = x.type(self.dtype)
|
| 1027 |
+
for module in self.input_blocks:
|
| 1028 |
+
h = module(h, emb)
|
| 1029 |
+
if self.pool.startswith("spatial"):
|
| 1030 |
+
results.append(h.type(x.dtype).mean(dim=(2, 3)))
|
| 1031 |
+
h = self.middle_block(h, emb)
|
| 1032 |
+
|
| 1033 |
+
|
| 1034 |
+
if self.pool.startswith("spatial"):
|
| 1035 |
+
self.cam_feature_maps = h
|
| 1036 |
+
h = self.gap(h)
|
| 1037 |
+
N = h.shape[0]
|
| 1038 |
+
h = h.reshape(N, -1)
|
| 1039 |
+
print('h1', h.shape)
|
| 1040 |
+
return self.out(h)
|
| 1041 |
+
else:
|
| 1042 |
+
h = h.type(x.dtype)
|
| 1043 |
+
self.cam_feature_maps = h
|
| 1044 |
+
return self.out(h)
|
guided_diffusion/wunet.py
ADDED
|
@@ -0,0 +1,795 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import abstractmethod
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch as th
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from .nn import checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding
|
| 10 |
+
from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TimestepBlock(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
Any module where forward() takes timestep embeddings as a second argument.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
@abstractmethod
|
| 19 |
+
def forward(self, x, emb):
|
| 20 |
+
"""
|
| 21 |
+
Apply the module to `x` given `emb` timestep embeddings.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
| 26 |
+
"""
|
| 27 |
+
A sequential module that passes timestep embeddings to the children that
|
| 28 |
+
support it as an extra input.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def forward(self, x, emb):
|
| 32 |
+
for layer in self:
|
| 33 |
+
if isinstance(layer, TimestepBlock):
|
| 34 |
+
x = layer(x, emb)
|
| 35 |
+
else:
|
| 36 |
+
x = layer(x)
|
| 37 |
+
return x
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Upsample(nn.Module):
|
| 41 |
+
"""
|
| 42 |
+
A wavelet upsampling layer with an optional convolution on the skip connections used to perform upsampling.
|
| 43 |
+
|
| 44 |
+
:param channels: channels in the inputs and outputs.
|
| 45 |
+
:param use_conv: a bool determining if a convolution is applied.
|
| 46 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
| 47 |
+
upsampling occurs in the inner-two dimensions.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True, use_freq=True):
|
| 51 |
+
super().__init__()
|
| 52 |
+
self.channels = channels
|
| 53 |
+
self.out_channels = out_channels or channels
|
| 54 |
+
self.use_conv = use_conv
|
| 55 |
+
self.dims = dims
|
| 56 |
+
self.resample_2d = resample_2d
|
| 57 |
+
|
| 58 |
+
self.use_freq = use_freq
|
| 59 |
+
self.idwt = IDWT_3D("haar")
|
| 60 |
+
|
| 61 |
+
# Grouped convolution on 7 high frequency subbands (skip connections)
|
| 62 |
+
if use_conv:
|
| 63 |
+
self.conv = conv_nd(dims, self.channels * 7, self.out_channels * 7, 3, padding=1, groups=7)
|
| 64 |
+
|
| 65 |
+
def forward(self, x):
|
| 66 |
+
if isinstance(x, tuple):
|
| 67 |
+
skip = x[1]
|
| 68 |
+
x = x[0]
|
| 69 |
+
assert x.shape[1] == self.channels
|
| 70 |
+
|
| 71 |
+
if self.use_conv:
|
| 72 |
+
skip = self.conv(th.cat(skip, dim=1) / 3.) * 3.
|
| 73 |
+
skip = tuple(th.chunk(skip, 7, dim=1))
|
| 74 |
+
|
| 75 |
+
if self.use_freq:
|
| 76 |
+
x = self.idwt(3. * x, skip[0], skip[1], skip[2], skip[3], skip[4], skip[5], skip[6])
|
| 77 |
+
else:
|
| 78 |
+
if self.dims == 3 and self.resample_2d:
|
| 79 |
+
x = F.interpolate(
|
| 80 |
+
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
| 81 |
+
)
|
| 82 |
+
else:
|
| 83 |
+
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
| 84 |
+
|
| 85 |
+
return x, None
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class Downsample(nn.Module):
|
| 89 |
+
"""
|
| 90 |
+
A wavelet downsampling layer with an optional convolution.
|
| 91 |
+
|
| 92 |
+
:param channels: channels in the inputs and outputs.
|
| 93 |
+
:param use_conv: a bool determining if a convolution is applied.
|
| 94 |
+
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
| 95 |
+
downsampling occurs in the inner-two dimensions.
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True, use_freq=True):
|
| 99 |
+
super().__init__()
|
| 100 |
+
self.channels = channels
|
| 101 |
+
self.out_channels = out_channels or channels
|
| 102 |
+
self.use_conv = use_conv
|
| 103 |
+
self.dims = dims
|
| 104 |
+
|
| 105 |
+
self.use_freq = use_freq
|
| 106 |
+
self.dwt = DWT_3D("haar")
|
| 107 |
+
|
| 108 |
+
stride = (1, 2, 2) if dims == 3 and resample_2d else 2
|
| 109 |
+
|
| 110 |
+
if use_conv:
|
| 111 |
+
self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1)
|
| 112 |
+
elif self.use_freq:
|
| 113 |
+
self.op = self.dwt
|
| 114 |
+
else:
|
| 115 |
+
assert self.channels == self.out_channels
|
| 116 |
+
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
| 117 |
+
|
| 118 |
+
def forward(self, x):
|
| 119 |
+
if self.use_freq:
|
| 120 |
+
LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = self.op(x)
|
| 121 |
+
x = (LLL / 3., (LLH, LHL, LHH, HLL, HLH, HHL, HHH))
|
| 122 |
+
else:
|
| 123 |
+
x = self.op(x)
|
| 124 |
+
return x
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class WaveletDownsample(nn.Module):
|
| 128 |
+
"""
|
| 129 |
+
Implements the wavelet downsampling blocks used to generate the input residuals.
|
| 130 |
+
|
| 131 |
+
:param in_ch: number of input channels.
|
| 132 |
+
:param out_ch: number of output channels (should match the feature size of the corresponding U-Net level)
|
| 133 |
+
"""
|
| 134 |
+
def __init__(self, in_ch=None, out_ch=None):
|
| 135 |
+
super().__init__()
|
| 136 |
+
out_ch = out_ch if out_ch else in_ch
|
| 137 |
+
self.in_ch = in_ch
|
| 138 |
+
self.out_ch = out_ch
|
| 139 |
+
self.conv = conv_nd(3, self.in_ch * 8, self.out_ch, 3, stride=1, padding=1)
|
| 140 |
+
self.dwt = DWT_3D('haar')
|
| 141 |
+
|
| 142 |
+
def forward(self, x):
|
| 143 |
+
LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = self.dwt(x)
|
| 144 |
+
x = th.cat((LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH), dim=1) / 3.
|
| 145 |
+
return self.conv(x)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class ResBlock(TimestepBlock):
|
| 149 |
+
"""
|
| 150 |
+
A residual block that can optionally change the number of channels via up- or downsampling.
|
| 151 |
+
|
| 152 |
+
:param channels: the number of input channels.
|
| 153 |
+
:param emb_channels: the number of timestep embedding channels.
|
| 154 |
+
:param dropout: the rate of dropout.
|
| 155 |
+
:param out_channels: if specified, the number of out channels, otherwise out_channels = channels.
|
| 156 |
+
:param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1
|
| 157 |
+
convolution to change the channels in the skip connection.
|
| 158 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
| 159 |
+
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
| 160 |
+
:param up: if True, use this block for upsampling.
|
| 161 |
+
:param down: if True, use this block for downsampling.
|
| 162 |
+
:param num_groups: if specified, the number of groups in the (adaptive) group normalization layers.
|
| 163 |
+
:param use_freq: specifies if frequency aware up- or downsampling should be used.
|
| 164 |
+
:param z_emb_dim: the dimension of the z-embedding.
|
| 165 |
+
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
def __init__(self, channels, emb_channels, dropout, out_channels=None, use_conv=True, use_scale_shift_norm=False,
|
| 169 |
+
dims=2, use_checkpoint=False, up=False, down=False, num_groups=32, resample_2d=True, use_freq=False):
|
| 170 |
+
super().__init__()
|
| 171 |
+
self.channels = channels
|
| 172 |
+
self.emb_channels = emb_channels
|
| 173 |
+
self.dropout = dropout
|
| 174 |
+
self.out_channels = out_channels or channels
|
| 175 |
+
self.use_conv = use_conv
|
| 176 |
+
self.use_scale_shift_norm = use_scale_shift_norm
|
| 177 |
+
self.use_checkpoint = use_checkpoint
|
| 178 |
+
self.up = up
|
| 179 |
+
self.down = down
|
| 180 |
+
self.num_groups = num_groups
|
| 181 |
+
self.use_freq = use_freq
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
# Define (adaptive) group normalization layers
|
| 185 |
+
self.in_layers = nn.Sequential(
|
| 186 |
+
normalization(channels, self.num_groups),
|
| 187 |
+
nn.SiLU(),
|
| 188 |
+
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# Check if up- or downsampling should be performed by this ResBlock
|
| 192 |
+
self.updown = up or down
|
| 193 |
+
if up:
|
| 194 |
+
self.h_upd = Upsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
|
| 195 |
+
self.x_upd = Upsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
|
| 196 |
+
elif down:
|
| 197 |
+
self.h_upd = Downsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
|
| 198 |
+
self.x_upd = Downsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
|
| 199 |
+
else:
|
| 200 |
+
self.h_upd = self.x_upd = nn.Identity()
|
| 201 |
+
|
| 202 |
+
# Define the timestep embedding layers
|
| 203 |
+
self.emb_layers = nn.Sequential(
|
| 204 |
+
nn.SiLU(),
|
| 205 |
+
linear(emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels),
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
# Define output layers including (adaptive) group normalization
|
| 209 |
+
self.out_layers = nn.Sequential(
|
| 210 |
+
normalization(self.out_channels, self.num_groups),
|
| 211 |
+
nn.SiLU(),
|
| 212 |
+
nn.Dropout(p=dropout),
|
| 213 |
+
zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)),
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
# Define skip branch
|
| 217 |
+
if self.out_channels == channels:
|
| 218 |
+
self.skip_connection = nn.Identity()
|
| 219 |
+
else:
|
| 220 |
+
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def forward(self, x, temb):
|
| 224 |
+
# Make sure to pipe skip connections
|
| 225 |
+
if isinstance(x, tuple):
|
| 226 |
+
hSkip = x[1]
|
| 227 |
+
else:
|
| 228 |
+
hSkip = None
|
| 229 |
+
|
| 230 |
+
# Forward pass for ResBlock with up- or downsampling
|
| 231 |
+
if self.updown:
|
| 232 |
+
if self.up:
|
| 233 |
+
x = x[0]
|
| 234 |
+
h = self.in_layers(x)
|
| 235 |
+
|
| 236 |
+
if self.up:
|
| 237 |
+
h = (h, hSkip)
|
| 238 |
+
x = (x, hSkip)
|
| 239 |
+
|
| 240 |
+
h, hSkip = self.h_upd(h) # Updown in main branch (ResBlock)
|
| 241 |
+
x, xSkip = self.x_upd(x) # Updown in skip-connection (ResBlock)
|
| 242 |
+
|
| 243 |
+
# Forward pass for standard ResBlock
|
| 244 |
+
else:
|
| 245 |
+
if isinstance(x, tuple): # Check for skip connection tuple
|
| 246 |
+
x = x[0]
|
| 247 |
+
h = self.in_layers(x)
|
| 248 |
+
|
| 249 |
+
# Common layers for both standard and updown ResBlocks
|
| 250 |
+
emb_out = self.emb_layers(temb)
|
| 251 |
+
|
| 252 |
+
while len(emb_out.shape) < len(h.shape):
|
| 253 |
+
emb_out = emb_out[..., None]
|
| 254 |
+
|
| 255 |
+
if self.use_scale_shift_norm:
|
| 256 |
+
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
| 257 |
+
scale, shift = th.chunk(emb_out, 2, dim=1)
|
| 258 |
+
h = out_norm(h) * (1 + scale) + shift
|
| 259 |
+
h = out_rest(h)
|
| 260 |
+
|
| 261 |
+
else:
|
| 262 |
+
h = h + emb_out # Add timestep embedding
|
| 263 |
+
h = self.out_layers(h) # Forward pass out layers
|
| 264 |
+
|
| 265 |
+
# Add skip connections
|
| 266 |
+
out = self.skip_connection(x) + h
|
| 267 |
+
out = out, hSkip
|
| 268 |
+
|
| 269 |
+
return out
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
class AttentionBlock(nn.Module):
|
| 274 |
+
"""
|
| 275 |
+
An attention block that allows spatial positions to attend to each other.
|
| 276 |
+
|
| 277 |
+
Originally ported from here, but adapted to the N-d case.
|
| 278 |
+
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
| 279 |
+
"""
|
| 280 |
+
|
| 281 |
+
def __init__(
|
| 282 |
+
self,
|
| 283 |
+
channels,
|
| 284 |
+
num_heads=1,
|
| 285 |
+
num_head_channels=-1,
|
| 286 |
+
use_checkpoint=False,
|
| 287 |
+
use_new_attention_order=False,
|
| 288 |
+
num_groups=32,
|
| 289 |
+
):
|
| 290 |
+
super().__init__()
|
| 291 |
+
self.channels = channels
|
| 292 |
+
if num_head_channels == -1:
|
| 293 |
+
self.num_heads = num_heads
|
| 294 |
+
else:
|
| 295 |
+
assert (
|
| 296 |
+
channels % num_head_channels == 0
|
| 297 |
+
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
| 298 |
+
self.num_heads = channels // num_head_channels
|
| 299 |
+
self.use_checkpoint = use_checkpoint
|
| 300 |
+
self.norm = normalization(channels, num_groups)
|
| 301 |
+
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
| 302 |
+
if use_new_attention_order:
|
| 303 |
+
self.attention = QKVAttention(self.num_heads)
|
| 304 |
+
else:
|
| 305 |
+
# split heads before split qkv
|
| 306 |
+
self.attention = QKVAttentionLegacy(self.num_heads)
|
| 307 |
+
|
| 308 |
+
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
| 309 |
+
|
| 310 |
+
def forward(self, x):
|
| 311 |
+
return checkpoint(self._forward, (x,), self.parameters(), True)
|
| 312 |
+
|
| 313 |
+
def _forward(self, x):
|
| 314 |
+
b, c, *spatial = x.shape
|
| 315 |
+
x = x.reshape(b, c, -1)
|
| 316 |
+
qkv = self.qkv(self.norm(x))
|
| 317 |
+
h = self.attention(qkv)
|
| 318 |
+
h = self.proj_out(h)
|
| 319 |
+
return (x + h).reshape(b, c, *spatial)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def count_flops_attn(model, _x, y):
|
| 323 |
+
"""
|
| 324 |
+
A counter for the `thop` package to count the operations in an
|
| 325 |
+
attention operation.
|
| 326 |
+
Meant to be used like:
|
| 327 |
+
macs, params = thop.profile(
|
| 328 |
+
model,
|
| 329 |
+
inputs=(inputs, timestamps),
|
| 330 |
+
custom_ops={QKVAttention: QKVAttention.count_flops},
|
| 331 |
+
)
|
| 332 |
+
"""
|
| 333 |
+
b, c, *spatial = y[0].shape
|
| 334 |
+
num_spatial = int(np.prod(spatial))
|
| 335 |
+
# We perform two matmuls with the same number of ops.
|
| 336 |
+
# The first computes the weight matrix, the second computes
|
| 337 |
+
# the combination of the value vectors.
|
| 338 |
+
matmul_ops = 2 * b * (num_spatial ** 2) * c
|
| 339 |
+
model.total_ops += th.DoubleTensor([matmul_ops])
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class QKVAttentionLegacy(nn.Module):
|
| 343 |
+
"""
|
| 344 |
+
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
|
| 345 |
+
"""
|
| 346 |
+
|
| 347 |
+
def __init__(self, n_heads):
|
| 348 |
+
super().__init__()
|
| 349 |
+
self.n_heads = n_heads
|
| 350 |
+
|
| 351 |
+
def forward(self, qkv):
|
| 352 |
+
"""
|
| 353 |
+
Apply QKV attention.
|
| 354 |
+
|
| 355 |
+
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
|
| 356 |
+
:return: an [N x (H * C) x T] tensor after attention.
|
| 357 |
+
"""
|
| 358 |
+
bs, width, length = qkv.shape
|
| 359 |
+
assert width % (3 * self.n_heads) == 0
|
| 360 |
+
ch = width // (3 * self.n_heads)
|
| 361 |
+
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
|
| 362 |
+
scale = 1 / math.sqrt(math.sqrt(ch))
|
| 363 |
+
weight = th.einsum(
|
| 364 |
+
"bct,bcs->bts", q * scale, k * scale
|
| 365 |
+
) # More stable with f16 than dividing afterwards
|
| 366 |
+
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 367 |
+
a = th.einsum("bts,bcs->bct", weight, v)
|
| 368 |
+
return a.reshape(bs, -1, length)
|
| 369 |
+
|
| 370 |
+
@staticmethod
|
| 371 |
+
def count_flops(model, _x, y):
|
| 372 |
+
return count_flops_attn(model, _x, y)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
class QKVAttention(nn.Module):
|
| 376 |
+
"""
|
| 377 |
+
A module which performs QKV attention and splits in a different order.
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
def __init__(self, n_heads):
|
| 381 |
+
super().__init__()
|
| 382 |
+
self.n_heads = n_heads
|
| 383 |
+
|
| 384 |
+
def forward(self, qkv):
|
| 385 |
+
"""
|
| 386 |
+
Apply QKV attention.
|
| 387 |
+
|
| 388 |
+
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
|
| 389 |
+
:return: an [N x (H * C) x T] tensor after attention.
|
| 390 |
+
"""
|
| 391 |
+
bs, width, length = qkv.shape
|
| 392 |
+
assert width % (3 * self.n_heads) == 0
|
| 393 |
+
ch = width // (3 * self.n_heads)
|
| 394 |
+
q, k, v = qkv.chunk(3, dim=1)
|
| 395 |
+
scale = 1 / math.sqrt(math.sqrt(ch))
|
| 396 |
+
weight = th.einsum(
|
| 397 |
+
"bct,bcs->bts",
|
| 398 |
+
(q * scale).view(bs * self.n_heads, ch, length),
|
| 399 |
+
(k * scale).view(bs * self.n_heads, ch, length),
|
| 400 |
+
) # More stable with f16 than dividing afterwards
|
| 401 |
+
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
| 402 |
+
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
|
| 403 |
+
return a.reshape(bs, -1, length)
|
| 404 |
+
|
| 405 |
+
@staticmethod
|
| 406 |
+
def count_flops(model, _x, y):
|
| 407 |
+
return count_flops_attn(model, _x, y)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
class WavUNetModel(nn.Module):
|
| 411 |
+
"""
|
| 412 |
+
The full UNet model with attention and timestep embedding.
|
| 413 |
+
|
| 414 |
+
:param in_channels: channels in the input Tensor.
|
| 415 |
+
:param model_channels: base channel count for the model.
|
| 416 |
+
:param out_channels: channels in the output Tensor.
|
| 417 |
+
:param num_res_blocks: number of residual blocks per downsample.
|
| 418 |
+
:param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set,
|
| 419 |
+
list, or tuple. For example, if this contains 4, then at 4x downsampling, attention
|
| 420 |
+
will be used.
|
| 421 |
+
:param dropout: the dropout probability.
|
| 422 |
+
:param channel_mult: channel multiplier for each level of the UNet.
|
| 423 |
+
:param conv_resample: if True, use learned convolutions for upsampling and downsampling.
|
| 424 |
+
:param dims: determines if the signal is 1D, 2D, or 3D.
|
| 425 |
+
:param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes.
|
| 426 |
+
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
| 427 |
+
:param num_heads: the number of attention heads in each attention layer.
|
| 428 |
+
:param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head.
|
| 429 |
+
:param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated.
|
| 430 |
+
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
| 431 |
+
:param resblock_updown: use residual blocks for up/downsampling.
|
| 432 |
+
:param use_new_attention_order: use a different attention pattern for potentially increased efficiency.
|
| 433 |
+
"""
|
| 434 |
+
|
| 435 |
+
def __init__(self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions,
|
| 436 |
+
dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None,
|
| 437 |
+
use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1,
|
| 438 |
+
use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, num_groups=32,
|
| 439 |
+
bottleneck_attention=True, resample_2d=True, additive_skips=False, decoder_device_thresh=0,
|
| 440 |
+
use_freq=False, progressive_input='residual'):
|
| 441 |
+
super().__init__()
|
| 442 |
+
|
| 443 |
+
if num_heads_upsample == -1:
|
| 444 |
+
num_heads_upsample = num_heads
|
| 445 |
+
|
| 446 |
+
self.image_size = image_size
|
| 447 |
+
self.in_channels = in_channels
|
| 448 |
+
self.model_channels = model_channels
|
| 449 |
+
self.out_channels = out_channels
|
| 450 |
+
self.num_res_blocks = num_res_blocks
|
| 451 |
+
self.attention_resolutions = attention_resolutions
|
| 452 |
+
self.dropout = dropout
|
| 453 |
+
self.channel_mult = channel_mult
|
| 454 |
+
# self.conv_resample = conv_resample
|
| 455 |
+
self.num_classes = num_classes
|
| 456 |
+
self.use_checkpoint = use_checkpoint
|
| 457 |
+
# self.num_heads = num_heads
|
| 458 |
+
# self.num_head_channels = num_head_channels
|
| 459 |
+
# self.num_heads_upsample = num_heads_upsample
|
| 460 |
+
self.num_groups = num_groups
|
| 461 |
+
self.bottleneck_attention = bottleneck_attention
|
| 462 |
+
self.devices = None
|
| 463 |
+
self.decoder_device_thresh = decoder_device_thresh
|
| 464 |
+
self.additive_skips = additive_skips
|
| 465 |
+
self.use_freq = use_freq
|
| 466 |
+
self.progressive_input = progressive_input
|
| 467 |
+
|
| 468 |
+
#############################
|
| 469 |
+
# TIMESTEP EMBEDDING layers #
|
| 470 |
+
#############################
|
| 471 |
+
time_embed_dim = model_channels * 4
|
| 472 |
+
self.time_embed = nn.Sequential(
|
| 473 |
+
linear(model_channels, time_embed_dim),
|
| 474 |
+
nn.SiLU(),
|
| 475 |
+
linear(time_embed_dim, time_embed_dim))
|
| 476 |
+
|
| 477 |
+
###############
|
| 478 |
+
# INPUT block #
|
| 479 |
+
###############
|
| 480 |
+
self.input_blocks = nn.ModuleList(
|
| 481 |
+
[
|
| 482 |
+
TimestepEmbedSequential(
|
| 483 |
+
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
| 484 |
+
)
|
| 485 |
+
]
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
self._feature_size = model_channels
|
| 489 |
+
input_block_chans = [model_channels]
|
| 490 |
+
ch = model_channels
|
| 491 |
+
input_pyramid_channels =in_channels
|
| 492 |
+
ds = 1
|
| 493 |
+
|
| 494 |
+
######################################
|
| 495 |
+
# DOWNWARD path - Feature extraction #
|
| 496 |
+
######################################
|
| 497 |
+
for level, mult in enumerate(channel_mult):
|
| 498 |
+
for _ in range(num_res_blocks): # Adding Residual blocks
|
| 499 |
+
layers = [
|
| 500 |
+
ResBlock(
|
| 501 |
+
channels=ch,
|
| 502 |
+
emb_channels=time_embed_dim,
|
| 503 |
+
dropout=dropout,
|
| 504 |
+
out_channels=mult * model_channels,
|
| 505 |
+
dims=dims,
|
| 506 |
+
use_checkpoint=use_checkpoint,
|
| 507 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 508 |
+
num_groups=self.num_groups,
|
| 509 |
+
resample_2d=resample_2d,
|
| 510 |
+
use_freq=self.use_freq,
|
| 511 |
+
)
|
| 512 |
+
]
|
| 513 |
+
ch = mult * model_channels # New input channels = channel_mult * base_channels
|
| 514 |
+
# (first ResBlock performs channel adaption)
|
| 515 |
+
|
| 516 |
+
if ds in attention_resolutions: # Adding Attention layers
|
| 517 |
+
layers.append(
|
| 518 |
+
AttentionBlock(
|
| 519 |
+
ch,
|
| 520 |
+
use_checkpoint=use_checkpoint,
|
| 521 |
+
num_heads=num_heads,
|
| 522 |
+
num_head_channels=num_head_channels,
|
| 523 |
+
use_new_attention_order=use_new_attention_order,
|
| 524 |
+
num_groups=self.num_groups,
|
| 525 |
+
)
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
| 529 |
+
self._feature_size += ch
|
| 530 |
+
input_block_chans.append(ch)
|
| 531 |
+
|
| 532 |
+
# Adding downsampling operation
|
| 533 |
+
out_ch = ch
|
| 534 |
+
layers = []
|
| 535 |
+
layers.append(
|
| 536 |
+
ResBlock(
|
| 537 |
+
ch,
|
| 538 |
+
time_embed_dim,
|
| 539 |
+
dropout,
|
| 540 |
+
out_channels=out_ch,
|
| 541 |
+
dims=dims,
|
| 542 |
+
use_checkpoint=use_checkpoint,
|
| 543 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 544 |
+
down=True,
|
| 545 |
+
num_groups=self.num_groups,
|
| 546 |
+
resample_2d=resample_2d,
|
| 547 |
+
use_freq=self.use_freq,
|
| 548 |
+
)
|
| 549 |
+
if resblock_updown
|
| 550 |
+
else Downsample(
|
| 551 |
+
ch,
|
| 552 |
+
conv_resample,
|
| 553 |
+
dims=dims,
|
| 554 |
+
out_channels=out_ch,
|
| 555 |
+
resample_2d=resample_2d,
|
| 556 |
+
)
|
| 557 |
+
)
|
| 558 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
| 559 |
+
|
| 560 |
+
layers = []
|
| 561 |
+
if self.progressive_input == 'residual':
|
| 562 |
+
layers.append(WaveletDownsample(in_ch=input_pyramid_channels, out_ch=out_ch))
|
| 563 |
+
input_pyramid_channels = out_ch
|
| 564 |
+
|
| 565 |
+
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
| 566 |
+
|
| 567 |
+
ch = out_ch
|
| 568 |
+
input_block_chans.append(ch)
|
| 569 |
+
ds *= 2
|
| 570 |
+
self._feature_size += ch
|
| 571 |
+
|
| 572 |
+
self.input_block_chans_bk = input_block_chans[:]
|
| 573 |
+
|
| 574 |
+
#########################
|
| 575 |
+
# LATENT/ MIDDLE blocks #
|
| 576 |
+
#########################
|
| 577 |
+
self.middle_block = TimestepEmbedSequential(
|
| 578 |
+
ResBlock(
|
| 579 |
+
ch,
|
| 580 |
+
time_embed_dim,
|
| 581 |
+
dropout,
|
| 582 |
+
dims=dims,
|
| 583 |
+
use_checkpoint=use_checkpoint,
|
| 584 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 585 |
+
num_groups=self.num_groups,
|
| 586 |
+
resample_2d=resample_2d,
|
| 587 |
+
use_freq=self.use_freq,
|
| 588 |
+
),
|
| 589 |
+
*([AttentionBlock(
|
| 590 |
+
ch,
|
| 591 |
+
use_checkpoint=use_checkpoint,
|
| 592 |
+
num_heads=num_heads,
|
| 593 |
+
num_head_channels=num_head_channels,
|
| 594 |
+
use_new_attention_order=use_new_attention_order,
|
| 595 |
+
num_groups=self.num_groups,
|
| 596 |
+
)] if self.bottleneck_attention else [])
|
| 597 |
+
,
|
| 598 |
+
ResBlock(
|
| 599 |
+
ch,
|
| 600 |
+
time_embed_dim,
|
| 601 |
+
dropout,
|
| 602 |
+
dims=dims,
|
| 603 |
+
use_checkpoint=use_checkpoint,
|
| 604 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 605 |
+
num_groups=self.num_groups,
|
| 606 |
+
resample_2d=resample_2d,
|
| 607 |
+
use_freq=self.use_freq,
|
| 608 |
+
),
|
| 609 |
+
)
|
| 610 |
+
self._feature_size += ch
|
| 611 |
+
|
| 612 |
+
#################################
|
| 613 |
+
# UPWARD path - feature mapping #
|
| 614 |
+
#################################
|
| 615 |
+
self.output_blocks = nn.ModuleList([])
|
| 616 |
+
for level, mult in list(enumerate(channel_mult))[::-1]:
|
| 617 |
+
for i in range(num_res_blocks+1): # Adding Residual blocks
|
| 618 |
+
if not i == num_res_blocks:
|
| 619 |
+
mid_ch = model_channels * mult
|
| 620 |
+
|
| 621 |
+
layers = [
|
| 622 |
+
ResBlock(
|
| 623 |
+
ch,
|
| 624 |
+
time_embed_dim,
|
| 625 |
+
dropout,
|
| 626 |
+
out_channels=mid_ch,
|
| 627 |
+
dims=dims,
|
| 628 |
+
use_checkpoint=use_checkpoint,
|
| 629 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 630 |
+
num_groups=self.num_groups,
|
| 631 |
+
resample_2d=resample_2d,
|
| 632 |
+
use_freq=self.use_freq,
|
| 633 |
+
)
|
| 634 |
+
]
|
| 635 |
+
if ds in attention_resolutions: # Adding Attention layers
|
| 636 |
+
layers.append(
|
| 637 |
+
AttentionBlock(
|
| 638 |
+
mid_ch,
|
| 639 |
+
use_checkpoint=use_checkpoint,
|
| 640 |
+
num_heads=num_heads_upsample,
|
| 641 |
+
num_head_channels=num_head_channels,
|
| 642 |
+
use_new_attention_order=use_new_attention_order,
|
| 643 |
+
num_groups=self.num_groups,
|
| 644 |
+
)
|
| 645 |
+
)
|
| 646 |
+
ch = mid_ch
|
| 647 |
+
else: # Adding upsampling operation
|
| 648 |
+
out_ch = ch
|
| 649 |
+
layers.append(
|
| 650 |
+
ResBlock(
|
| 651 |
+
mid_ch,
|
| 652 |
+
time_embed_dim,
|
| 653 |
+
dropout,
|
| 654 |
+
out_channels=out_ch,
|
| 655 |
+
dims=dims,
|
| 656 |
+
use_checkpoint=use_checkpoint,
|
| 657 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 658 |
+
up=True,
|
| 659 |
+
num_groups=self.num_groups,
|
| 660 |
+
resample_2d=resample_2d,
|
| 661 |
+
use_freq=self.use_freq,
|
| 662 |
+
)
|
| 663 |
+
if resblock_updown
|
| 664 |
+
else Upsample(
|
| 665 |
+
mid_ch,
|
| 666 |
+
conv_resample,
|
| 667 |
+
dims=dims,
|
| 668 |
+
out_channels=out_ch,
|
| 669 |
+
resample_2d=resample_2d
|
| 670 |
+
)
|
| 671 |
+
)
|
| 672 |
+
ds //= 2
|
| 673 |
+
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
| 674 |
+
self._feature_size += ch
|
| 675 |
+
mid_ch = ch
|
| 676 |
+
|
| 677 |
+
################
|
| 678 |
+
# Out ResBlock #
|
| 679 |
+
################
|
| 680 |
+
self.out_res = nn.ModuleList([])
|
| 681 |
+
for i in range(num_res_blocks):
|
| 682 |
+
layers = [
|
| 683 |
+
ResBlock(
|
| 684 |
+
ch,
|
| 685 |
+
time_embed_dim,
|
| 686 |
+
dropout,
|
| 687 |
+
out_channels=ch,
|
| 688 |
+
dims=dims,
|
| 689 |
+
use_checkpoint=use_checkpoint,
|
| 690 |
+
use_scale_shift_norm=use_scale_shift_norm,
|
| 691 |
+
num_groups=self.num_groups,
|
| 692 |
+
resample_2d=resample_2d,
|
| 693 |
+
use_freq=self.use_freq,
|
| 694 |
+
)
|
| 695 |
+
]
|
| 696 |
+
self.out_res.append(TimestepEmbedSequential(*layers))
|
| 697 |
+
|
| 698 |
+
################
|
| 699 |
+
# OUTPUT block #
|
| 700 |
+
################
|
| 701 |
+
self.out = nn.Sequential(
|
| 702 |
+
normalization(ch, self.num_groups),
|
| 703 |
+
nn.SiLU(),
|
| 704 |
+
conv_nd(dims, model_channels, out_channels, 3, padding=1),
|
| 705 |
+
)
|
| 706 |
+
|
| 707 |
+
def to(self, *args, **kwargs):
|
| 708 |
+
"""
|
| 709 |
+
we overwrite the to() method for the case where we
|
| 710 |
+
distribute parts of our model to different devices
|
| 711 |
+
"""
|
| 712 |
+
if isinstance(args[0], (list, tuple)) and len(args[0]) > 1:
|
| 713 |
+
assert not kwargs and len(args) == 1
|
| 714 |
+
# distribute to multiple devices
|
| 715 |
+
self.devices = args[0]
|
| 716 |
+
# move first half to first device, second half to second device
|
| 717 |
+
self.input_blocks.to(self.devices[0])
|
| 718 |
+
self.time_embed.to(self.devices[0])
|
| 719 |
+
self.middle_block.to(self.devices[0]) # maybe devices 0
|
| 720 |
+
for k, b in enumerate(self.output_blocks):
|
| 721 |
+
if k < self.decoder_device_thresh:
|
| 722 |
+
b.to(self.devices[0])
|
| 723 |
+
else: # after threshold
|
| 724 |
+
b.to(self.devices[1])
|
| 725 |
+
self.out.to(self.devices[0])
|
| 726 |
+
print(f"distributed UNet components to devices {self.devices}")
|
| 727 |
+
|
| 728 |
+
else: # default behaviour
|
| 729 |
+
super().to(*args, **kwargs)
|
| 730 |
+
if self.devices is None: # if self.devices has not been set yet, read it from params
|
| 731 |
+
p = next(self.parameters())
|
| 732 |
+
self.devices = [p.device, p.device]
|
| 733 |
+
|
| 734 |
+
def forward(self, x, timesteps):
|
| 735 |
+
"""
|
| 736 |
+
Apply the model to an input batch.
|
| 737 |
+
|
| 738 |
+
:param x: an [N x C x ...] Tensor of inputs.
|
| 739 |
+
:param timesteps: a 1-D batch of timesteps.
|
| 740 |
+
:param zemb: an [N] Tensor of labels, if class-conditional.
|
| 741 |
+
:return: an [N x C x ...] Tensor of outputs.
|
| 742 |
+
"""
|
| 743 |
+
hs = [] # Save skip-connections here
|
| 744 |
+
input_pyramid = x
|
| 745 |
+
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) # Gen sinusoidal timestep embedding
|
| 746 |
+
h = x
|
| 747 |
+
self.hs_shapes = []
|
| 748 |
+
|
| 749 |
+
for module in self.input_blocks:
|
| 750 |
+
if not isinstance(module[0], WaveletDownsample):
|
| 751 |
+
h = module(h, emb) # Run a downstream module
|
| 752 |
+
skip = None
|
| 753 |
+
if isinstance(h, tuple): # Check for skip features (tuple of high frequency subbands) and store in hs
|
| 754 |
+
h, skip = h
|
| 755 |
+
hs.append(skip)
|
| 756 |
+
self.hs_shapes.append(h.shape)
|
| 757 |
+
else:
|
| 758 |
+
input_pyramid = module(input_pyramid, emb)
|
| 759 |
+
input_pyramid = input_pyramid + h
|
| 760 |
+
h = input_pyramid
|
| 761 |
+
|
| 762 |
+
for module in self.middle_block:
|
| 763 |
+
h = module(h, emb)
|
| 764 |
+
if isinstance(h, tuple):
|
| 765 |
+
h, skip = h
|
| 766 |
+
|
| 767 |
+
for module in self.output_blocks:
|
| 768 |
+
new_hs = hs.pop()
|
| 769 |
+
if new_hs:
|
| 770 |
+
skip = new_hs
|
| 771 |
+
|
| 772 |
+
# Use additive skip connections
|
| 773 |
+
if self.additive_skips:
|
| 774 |
+
h = (h + new_hs) / np.sqrt(2)
|
| 775 |
+
|
| 776 |
+
# Use frequency aware skip connections
|
| 777 |
+
elif self.use_freq: # You usually want to use the frequency aware upsampling
|
| 778 |
+
if isinstance(h, tuple): # Replace None with the stored skip features
|
| 779 |
+
l = list(h)
|
| 780 |
+
l[1] = skip
|
| 781 |
+
h = tuple(l)
|
| 782 |
+
else:
|
| 783 |
+
h = (h, skip)
|
| 784 |
+
|
| 785 |
+
# Use concatenation
|
| 786 |
+
else:
|
| 787 |
+
h = th.cat([h, new_hs], dim=1)
|
| 788 |
+
|
| 789 |
+
h = module(h, emb) # Run an upstream module
|
| 790 |
+
|
| 791 |
+
for module in self.out_res:
|
| 792 |
+
h = module(h, emb)
|
| 793 |
+
|
| 794 |
+
h, _ = h
|
| 795 |
+
return self.out(h)
|
run.sh
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# general settings
|
| 2 |
+
GPU=0; # gpu to use
|
| 3 |
+
SEED=42; # randomness seed for sampling
|
| 4 |
+
CHANNELS=64; # number of model base channels (we use 64 for all experiments)
|
| 5 |
+
MODE='train'; # train vs sample
|
| 6 |
+
DATASET='brats'; # brats, lidc-idri or inpaint
|
| 7 |
+
IN_CHANNELS=8;
|
| 8 |
+
MODEL='ours_unet_128'; # 'ours_unet_256', 'ours_wnet_128', 'ours_wnet_256'
|
| 9 |
+
|
| 10 |
+
# settings for sampling/inference
|
| 11 |
+
ITERATIONS=0; # training iteration (as a multiple of 1k) checkpoint to use for sampling
|
| 12 |
+
SAMPLING_STEPS=0; # number of steps for accelerated sampling, 0 for the default 1000
|
| 13 |
+
RUN_DIR=""; # tensorboard dir to be set for the evaluation
|
| 14 |
+
|
| 15 |
+
# detailed settings (no need to change for reproducing)
|
| 16 |
+
if [[ $MODEL == 'ours_unet_128' ]]; then
|
| 17 |
+
echo "MODEL: WDM (U-Net) 128 x 128 x 128";
|
| 18 |
+
CHANNEL_MULT=1,2,2,4,4;
|
| 19 |
+
IMAGE_SIZE=128;
|
| 20 |
+
ADDITIVE_SKIP=True;
|
| 21 |
+
USE_FREQ=False;
|
| 22 |
+
BATCH_SIZE=10;
|
| 23 |
+
elif [[ $MODEL == 'ours_unet_256' ]]; then
|
| 24 |
+
echo "MODEL: WDM (U-Net) 256 x 256 x 256";
|
| 25 |
+
CHANNEL_MULT=1,2,2,4,4,4;
|
| 26 |
+
IMAGE_SIZE=256;
|
| 27 |
+
ADDITIVE_SKIP=True;
|
| 28 |
+
USE_FREQ=False;
|
| 29 |
+
BATCH_SIZE=1;
|
| 30 |
+
elif [[ $MODEL == 'ours_wnet_128' ]]; then
|
| 31 |
+
echo "MODEL: WDM (WavU-Net) 128 x 128 x 128";
|
| 32 |
+
CHANNEL_MULT=1,2,2,4,4;
|
| 33 |
+
IMAGE_SIZE=128;
|
| 34 |
+
ADDITIVE_SKIP=False;
|
| 35 |
+
USE_FREQ=True;
|
| 36 |
+
BATCH_SIZE=10;
|
| 37 |
+
elif [[ $MODEL == 'ours_wnet_256' ]]; then
|
| 38 |
+
echo "MODEL: WDM (WavU-Net) 256 x 256 x 256";
|
| 39 |
+
CHANNEL_MULT=1,2,2,4,4,4;
|
| 40 |
+
IMAGE_SIZE=256;
|
| 41 |
+
ADDITIVE_SKIP=False;
|
| 42 |
+
USE_FREQ=True;
|
| 43 |
+
BATCH_SIZE=1;
|
| 44 |
+
else
|
| 45 |
+
echo "MODEL TYPE NOT FOUND -> Check the supported configurations again";
|
| 46 |
+
fi
|
| 47 |
+
|
| 48 |
+
# some information and overwriting batch size for sampling
|
| 49 |
+
# (overwrite in case you want to sample with a higher batch size)
|
| 50 |
+
# no need to change for reproducing
|
| 51 |
+
if [[ $MODE == 'sample' ]]; then
|
| 52 |
+
echo "MODE: sample"
|
| 53 |
+
BATCH_SIZE=1;
|
| 54 |
+
elif [[ $MODE == 'train' ]]; then
|
| 55 |
+
if [[ $DATASET == 'brats' ]]; then
|
| 56 |
+
echo "MODE: training";
|
| 57 |
+
echo "DATASET: BRATS";
|
| 58 |
+
DATA_DIR=~/wdm-3d/data/BRATS/;
|
| 59 |
+
elif [[ $DATASET == 'lidc-idri' ]]; then
|
| 60 |
+
echo "MODE: training";
|
| 61 |
+
echo "Dataset: LIDC-IDRI";
|
| 62 |
+
DATA_DIR=~/wdm-3d/data/LIDC-IDRI/;
|
| 63 |
+
IN_CHANNELS=8;
|
| 64 |
+
elif [[ $DATASET == 'inpaint' ]]; then
|
| 65 |
+
echo "MODE: training";
|
| 66 |
+
echo "DATASET: INPAINT";
|
| 67 |
+
DATA_DIR=~/wdm-3d/data/INPAINT/;
|
| 68 |
+
IN_CHANNELS=16;
|
| 69 |
+
else
|
| 70 |
+
echo "DATASET NOT FOUND -> Check the supported datasets again";
|
| 71 |
+
fi
|
| 72 |
+
fi
|
| 73 |
+
|
| 74 |
+
COMMON="
|
| 75 |
+
--dataset=${DATASET}
|
| 76 |
+
--num_channels=${CHANNELS}
|
| 77 |
+
--class_cond=False
|
| 78 |
+
--num_res_blocks=2
|
| 79 |
+
--num_heads=1
|
| 80 |
+
--learn_sigma=False
|
| 81 |
+
--use_scale_shift_norm=False
|
| 82 |
+
--attention_resolutions=
|
| 83 |
+
--channel_mult=${CHANNEL_MULT}
|
| 84 |
+
--diffusion_steps=1000
|
| 85 |
+
--noise_schedule=linear
|
| 86 |
+
--rescale_learned_sigmas=False
|
| 87 |
+
--rescale_timesteps=False
|
| 88 |
+
--dims=3
|
| 89 |
+
--batch_size=${BATCH_SIZE}
|
| 90 |
+
--num_groups=32
|
| 91 |
+
--in_channels=${IN_CHANNELS}
|
| 92 |
+
--out_channels=${IN_CHANNELS}
|
| 93 |
+
--bottleneck_attention=False
|
| 94 |
+
--resample_2d=False
|
| 95 |
+
--renormalize=True
|
| 96 |
+
--additive_skips=${ADDITIVE_SKIP}
|
| 97 |
+
--use_freq=${USE_FREQ}
|
| 98 |
+
--predict_xstart=True
|
| 99 |
+
"
|
| 100 |
+
TRAIN="
|
| 101 |
+
--data_dir=${DATA_DIR}
|
| 102 |
+
--resume_checkpoint=
|
| 103 |
+
--resume_step=0
|
| 104 |
+
--image_size=${IMAGE_SIZE}
|
| 105 |
+
--use_fp16=False
|
| 106 |
+
--lr=1e-5
|
| 107 |
+
--save_interval=100000
|
| 108 |
+
--num_workers=24
|
| 109 |
+
--devices=${GPU}
|
| 110 |
+
"
|
| 111 |
+
SAMPLE="
|
| 112 |
+
--data_dir=${DATA_DIR}
|
| 113 |
+
--data_mode=${DATA_MODE}
|
| 114 |
+
--seed=${SEED}
|
| 115 |
+
--image_size=${IMAGE_SIZE}
|
| 116 |
+
--use_fp16=False
|
| 117 |
+
--model_path=./${RUN_DIR}/checkpoints/${DATASET}_${ITERATIONS}000.pt
|
| 118 |
+
--devices=${GPU}
|
| 119 |
+
--output_dir=./results/${RUN_DIR}/${DATASET}_${MODEL}_${ITERATIONS}000/
|
| 120 |
+
--num_samples=1000
|
| 121 |
+
--use_ddim=False
|
| 122 |
+
--sampling_steps=${SAMPLING_STEPS}
|
| 123 |
+
--clip_denoised=True
|
| 124 |
+
"
|
| 125 |
+
|
| 126 |
+
# run the python scripts
|
| 127 |
+
if [[ $MODE == 'train' ]]; then
|
| 128 |
+
python scripts/generation_train.py $TRAIN $COMMON;
|
| 129 |
+
else
|
| 130 |
+
python scripts/generation_sample.py $SAMPLE $COMMON;
|
| 131 |
+
fi
|
scripts/generation_sample.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A script for sampling from a diffusion model for unconditional image generation.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import argparse
|
| 6 |
+
import nibabel as nib
|
| 7 |
+
import numpy as np
|
| 8 |
+
import os
|
| 9 |
+
import pathlib
|
| 10 |
+
import random
|
| 11 |
+
import sys
|
| 12 |
+
import torch as th
|
| 13 |
+
|
| 14 |
+
sys.path.append(".")
|
| 15 |
+
|
| 16 |
+
from guided_diffusion import (dist_util,
|
| 17 |
+
logger)
|
| 18 |
+
from guided_diffusion.script_util import (model_and_diffusion_defaults,
|
| 19 |
+
create_model_and_diffusion,
|
| 20 |
+
add_dict_to_argparser,
|
| 21 |
+
args_to_dict,
|
| 22 |
+
)
|
| 23 |
+
from guided_diffusion.inpaintloader import InpaintVolumes
|
| 24 |
+
from DWT_IDWT.DWT_IDWT_layer import IDWT_3D, DWT_3D
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def visualize(img):
|
| 28 |
+
_min = img.min()
|
| 29 |
+
_max = img.max()
|
| 30 |
+
normalized_img = (img - _min)/ (_max - _min)
|
| 31 |
+
return normalized_img
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def dice_score(pred, targs):
|
| 35 |
+
pred = (pred>0).float()
|
| 36 |
+
return 2. * (pred*targs).sum() / (pred+targs).sum()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def main():
|
| 40 |
+
args = create_argparser().parse_args()
|
| 41 |
+
seed = args.seed
|
| 42 |
+
dist_util.setup_dist(devices=args.devices)
|
| 43 |
+
logger.configure()
|
| 44 |
+
|
| 45 |
+
logger.log("Creating model and diffusion...")
|
| 46 |
+
model, diffusion = create_model_and_diffusion(
|
| 47 |
+
**args_to_dict(args, model_and_diffusion_defaults().keys())
|
| 48 |
+
)
|
| 49 |
+
logger.log("Load model from: {}".format(args.model_path))
|
| 50 |
+
model.load_state_dict(dist_util.load_state_dict(args.model_path, map_location="cpu"))
|
| 51 |
+
model.to(dist_util.dev([0, 1]) if len(args.devices) > 1 else dist_util.dev()) # allow for 2 devices
|
| 52 |
+
|
| 53 |
+
if args.use_fp16:
|
| 54 |
+
raise ValueError("fp16 currently not implemented")
|
| 55 |
+
|
| 56 |
+
model.eval()
|
| 57 |
+
idwt = IDWT_3D("haar")
|
| 58 |
+
dwt = DWT_3D("haar")
|
| 59 |
+
|
| 60 |
+
if args.dataset == 'inpaint':
|
| 61 |
+
ds = InpaintVolumes(args.data_dir, subset='val', img_size=args.image_size)
|
| 62 |
+
loader = th.utils.data.DataLoader(ds, batch_size=args.batch_size, shuffle=False)
|
| 63 |
+
data_iter = iter(loader)
|
| 64 |
+
else:
|
| 65 |
+
loader = None
|
| 66 |
+
|
| 67 |
+
for ind in range(args.num_samples // args.batch_size):
|
| 68 |
+
th.manual_seed(seed)
|
| 69 |
+
np.random.seed(seed)
|
| 70 |
+
random.seed(seed)
|
| 71 |
+
# print(f"Reseeded (in for loop) to {seed}")
|
| 72 |
+
|
| 73 |
+
seed += 1
|
| 74 |
+
|
| 75 |
+
if args.dataset == 'inpaint':
|
| 76 |
+
try:
|
| 77 |
+
Y, M, Y_void, name, affine = next(data_iter)
|
| 78 |
+
except StopIteration:
|
| 79 |
+
data_iter = iter(loader)
|
| 80 |
+
Y, M, Y_void, name, affine = next(data_iter)
|
| 81 |
+
Y = Y.to(dist_util.dev())
|
| 82 |
+
M = M.to(dist_util.dev())
|
| 83 |
+
Y_void = Y_void.to(dist_util.dev())
|
| 84 |
+
# Wavelet domain context and mask
|
| 85 |
+
ctx = th.cat(dwt(Y_void), dim=1)
|
| 86 |
+
mask_dwt = th.cat(dwt(M), dim=1)
|
| 87 |
+
mask_rep = mask_dwt.repeat(1, Y.shape[1], 1, 1, 1)
|
| 88 |
+
noise = th.randn_like(ctx)
|
| 89 |
+
sample = diffusion.p_sample_loop(
|
| 90 |
+
model,
|
| 91 |
+
shape=ctx.shape,
|
| 92 |
+
noise=noise,
|
| 93 |
+
clip_denoised=args.clip_denoised,
|
| 94 |
+
model_kwargs={'context': ctx, 'mask': mask_rep},
|
| 95 |
+
)
|
| 96 |
+
else:
|
| 97 |
+
img = th.randn(
|
| 98 |
+
args.batch_size,
|
| 99 |
+
8,
|
| 100 |
+
args.image_size // 2,
|
| 101 |
+
args.image_size // 2,
|
| 102 |
+
args.image_size // 2,
|
| 103 |
+
device=dist_util.dev(),
|
| 104 |
+
)
|
| 105 |
+
sample = diffusion.p_sample_loop(
|
| 106 |
+
model=model,
|
| 107 |
+
shape=img.shape,
|
| 108 |
+
noise=img,
|
| 109 |
+
clip_denoised=args.clip_denoised,
|
| 110 |
+
model_kwargs={},
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
B, _, D, H, W = sample.size()
|
| 114 |
+
|
| 115 |
+
sample = idwt(sample[:, 0, :, :, :].view(B, 1, D, H, W) * 3.,
|
| 116 |
+
sample[:, 1, :, :, :].view(B, 1, D, H, W),
|
| 117 |
+
sample[:, 2, :, :, :].view(B, 1, D, H, W),
|
| 118 |
+
sample[:, 3, :, :, :].view(B, 1, D, H, W),
|
| 119 |
+
sample[:, 4, :, :, :].view(B, 1, D, H, W),
|
| 120 |
+
sample[:, 5, :, :, :].view(B, 1, D, H, W),
|
| 121 |
+
sample[:, 6, :, :, :].view(B, 1, D, H, W),
|
| 122 |
+
sample[:, 7, :, :, :].view(B, 1, D, H, W))
|
| 123 |
+
|
| 124 |
+
sample = (sample + 1) / 2.
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
if len(sample.shape) == 5:
|
| 128 |
+
sample = sample.squeeze(dim=1) # don't squeeze batch dimension for bs 1
|
| 129 |
+
|
| 130 |
+
pathlib.Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
| 131 |
+
for i in range(sample.shape[0]):
|
| 132 |
+
if args.dataset == 'inpaint':
|
| 133 |
+
output_name = os.path.join(args.output_dir, f"{name[i]}_inpaint.nii.gz")
|
| 134 |
+
aff = affine[i] if isinstance(affine[i], np.ndarray) else affine[i].numpy()
|
| 135 |
+
else:
|
| 136 |
+
output_name = os.path.join(args.output_dir, f'sample_{ind}_{i}.nii.gz')
|
| 137 |
+
aff = np.eye(4)
|
| 138 |
+
img = nib.Nifti1Image(sample.detach().cpu().numpy()[i, :, :, :], aff)
|
| 139 |
+
nib.save(img=img, filename=output_name)
|
| 140 |
+
print(f'Saved to {output_name}')
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def create_argparser():
|
| 144 |
+
defaults = dict(
|
| 145 |
+
seed=0,
|
| 146 |
+
data_dir="",
|
| 147 |
+
data_mode='validation',
|
| 148 |
+
clip_denoised=True,
|
| 149 |
+
num_samples=1,
|
| 150 |
+
batch_size=1,
|
| 151 |
+
use_ddim=False,
|
| 152 |
+
class_cond=False,
|
| 153 |
+
sampling_steps=0,
|
| 154 |
+
model_path="",
|
| 155 |
+
devices=[0],
|
| 156 |
+
output_dir='./results',
|
| 157 |
+
mode='default',
|
| 158 |
+
renormalize=False,
|
| 159 |
+
image_size=256,
|
| 160 |
+
half_res_crop=False,
|
| 161 |
+
concat_coords=False, # if true, add 3 (for 3d) or 2 (for 2d) to in_channels
|
| 162 |
+
)
|
| 163 |
+
defaults.update({k:v for k, v in model_and_diffusion_defaults().items() if k not in defaults})
|
| 164 |
+
parser = argparse.ArgumentParser()
|
| 165 |
+
add_dict_to_argparser(parser, defaults)
|
| 166 |
+
return parser
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
main()
|
scripts/generation_train.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A script for training a diffusion model to unconditional image generation.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import argparse
|
| 6 |
+
import numpy as np
|
| 7 |
+
import random
|
| 8 |
+
import sys
|
| 9 |
+
import torch as th
|
| 10 |
+
|
| 11 |
+
sys.path.append(".")
|
| 12 |
+
sys.path.append("..")
|
| 13 |
+
|
| 14 |
+
from guided_diffusion import (dist_util,
|
| 15 |
+
logger)
|
| 16 |
+
from guided_diffusion.bratsloader import BRATSVolumes
|
| 17 |
+
from guided_diffusion.lidcloader import LIDCVolumes
|
| 18 |
+
from guided_diffusion.inpaintloader import InpaintVolumes
|
| 19 |
+
from guided_diffusion.resample import create_named_schedule_sampler
|
| 20 |
+
from guided_diffusion.script_util import (model_and_diffusion_defaults,
|
| 21 |
+
create_model_and_diffusion,
|
| 22 |
+
args_to_dict,
|
| 23 |
+
add_dict_to_argparser)
|
| 24 |
+
from guided_diffusion.train_util import TrainLoop
|
| 25 |
+
from guided_diffusion.pretrain_checks import run_pretrain_checks
|
| 26 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def main():
|
| 30 |
+
args = create_argparser().parse_args()
|
| 31 |
+
seed = args.seed
|
| 32 |
+
th.manual_seed(seed)
|
| 33 |
+
np.random.seed(seed)
|
| 34 |
+
random.seed(seed)
|
| 35 |
+
|
| 36 |
+
summary_writer = None
|
| 37 |
+
if args.use_tensorboard:
|
| 38 |
+
logdir = None
|
| 39 |
+
if args.tensorboard_path:
|
| 40 |
+
logdir = args.tensorboard_path
|
| 41 |
+
summary_writer = SummaryWriter(log_dir=logdir)
|
| 42 |
+
summary_writer.add_text(
|
| 43 |
+
'config',
|
| 44 |
+
'\n'.join([f'--{k}={repr(v)} <br/>' for k, v in vars(args).items()])
|
| 45 |
+
)
|
| 46 |
+
logger.configure(dir=summary_writer.get_logdir())
|
| 47 |
+
else:
|
| 48 |
+
logger.configure()
|
| 49 |
+
|
| 50 |
+
dist_util.setup_dist(devices=args.devices)
|
| 51 |
+
|
| 52 |
+
logger.log("Creating model and diffusion...")
|
| 53 |
+
arguments = args_to_dict(args, model_and_diffusion_defaults().keys())
|
| 54 |
+
model, diffusion = create_model_and_diffusion(**arguments)
|
| 55 |
+
|
| 56 |
+
# logger.log("Number of trainable parameters: {}".format(np.array([np.array(p.shape).prod() for p in model.parameters()]).sum()))
|
| 57 |
+
model.to(dist_util.dev([0, 1]) if len(args.devices) > 1 else dist_util.dev()) # allow for 2 devices
|
| 58 |
+
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion, maxt=1000)
|
| 59 |
+
|
| 60 |
+
if args.dataset == 'brats':
|
| 61 |
+
assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
|
| 62 |
+
ds = BRATSVolumes(args.data_dir, test_flag=False,
|
| 63 |
+
normalize=(lambda x: 2*x - 1) if args.renormalize else None,
|
| 64 |
+
mode='train',
|
| 65 |
+
img_size=args.image_size)
|
| 66 |
+
val_loader = None
|
| 67 |
+
|
| 68 |
+
elif args.dataset == 'lidc-idri':
|
| 69 |
+
assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
|
| 70 |
+
ds = LIDCVolumes(
|
| 71 |
+
args.data_dir,
|
| 72 |
+
test_flag=False,
|
| 73 |
+
normalize=(lambda x: 2 * x - 1) if args.renormalize else None,
|
| 74 |
+
mode='train',
|
| 75 |
+
img_size=args.image_size,
|
| 76 |
+
)
|
| 77 |
+
val_loader = None
|
| 78 |
+
|
| 79 |
+
elif args.dataset == 'inpaint':
|
| 80 |
+
ds = InpaintVolumes(
|
| 81 |
+
args.data_dir,
|
| 82 |
+
subset='train',
|
| 83 |
+
img_size=args.image_size,
|
| 84 |
+
normalize=(lambda x: 2 * x - 1) if args.renormalize else None,
|
| 85 |
+
)
|
| 86 |
+
val_ds = InpaintVolumes(
|
| 87 |
+
args.data_dir,
|
| 88 |
+
subset='val',
|
| 89 |
+
img_size=args.image_size,
|
| 90 |
+
normalize=(lambda x: 2 * x - 1) if args.renormalize else None,
|
| 91 |
+
)
|
| 92 |
+
val_loader = th.utils.data.DataLoader(
|
| 93 |
+
val_ds,
|
| 94 |
+
batch_size=args.batch_size,
|
| 95 |
+
num_workers=args.num_workers,
|
| 96 |
+
shuffle=False,
|
| 97 |
+
)
|
| 98 |
+
else:
|
| 99 |
+
print("We currently just support the datasets: brats, lidc-idri, inpaint")
|
| 100 |
+
val_loader = None
|
| 101 |
+
|
| 102 |
+
datal = th.utils.data.DataLoader(ds,
|
| 103 |
+
batch_size=args.batch_size,
|
| 104 |
+
num_workers=args.num_workers,
|
| 105 |
+
shuffle=True,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
if args.run_tests:
|
| 109 |
+
logger.log("Running pre-training checks...")
|
| 110 |
+
run_pretrain_checks(args, datal, model, diffusion, schedule_sampler)
|
| 111 |
+
return
|
| 112 |
+
|
| 113 |
+
logger.log("Start training...")
|
| 114 |
+
TrainLoop(
|
| 115 |
+
model=model,
|
| 116 |
+
diffusion=diffusion,
|
| 117 |
+
data=datal,
|
| 118 |
+
batch_size=args.batch_size,
|
| 119 |
+
in_channels=args.in_channels,
|
| 120 |
+
image_size=args.image_size,
|
| 121 |
+
microbatch=args.microbatch,
|
| 122 |
+
lr=args.lr,
|
| 123 |
+
ema_rate=args.ema_rate,
|
| 124 |
+
log_interval=args.log_interval,
|
| 125 |
+
save_interval=args.save_interval,
|
| 126 |
+
resume_checkpoint=args.resume_checkpoint,
|
| 127 |
+
resume_step=args.resume_step,
|
| 128 |
+
use_fp16=args.use_fp16,
|
| 129 |
+
fp16_scale_growth=args.fp16_scale_growth,
|
| 130 |
+
schedule_sampler=schedule_sampler,
|
| 131 |
+
weight_decay=args.weight_decay,
|
| 132 |
+
lr_anneal_steps=args.lr_anneal_steps,
|
| 133 |
+
dataset=args.dataset,
|
| 134 |
+
summary_writer=summary_writer,
|
| 135 |
+
mode='inpaint' if args.dataset == 'inpaint' else 'default',
|
| 136 |
+
val_data=val_loader,
|
| 137 |
+
val_interval=args.val_interval,
|
| 138 |
+
).run_loop()
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def create_argparser():
|
| 142 |
+
defaults = dict(
|
| 143 |
+
seed=0,
|
| 144 |
+
data_dir="",
|
| 145 |
+
schedule_sampler="uniform",
|
| 146 |
+
lr=1e-4,
|
| 147 |
+
weight_decay=0.0,
|
| 148 |
+
lr_anneal_steps=0,
|
| 149 |
+
batch_size=1,
|
| 150 |
+
microbatch=-1,
|
| 151 |
+
ema_rate="0.9999",
|
| 152 |
+
log_interval=100,
|
| 153 |
+
save_interval=5000,
|
| 154 |
+
resume_checkpoint='',
|
| 155 |
+
resume_step=0,
|
| 156 |
+
use_fp16=False,
|
| 157 |
+
fp16_scale_growth=1e-3,
|
| 158 |
+
dataset='brats',
|
| 159 |
+
use_tensorboard=True,
|
| 160 |
+
tensorboard_path='', # set path to existing logdir for resuming
|
| 161 |
+
devices=[0],
|
| 162 |
+
dims=3,
|
| 163 |
+
learn_sigma=False,
|
| 164 |
+
num_groups=32,
|
| 165 |
+
channel_mult="1,2,2,4,4",
|
| 166 |
+
in_channels=8,
|
| 167 |
+
out_channels=8,
|
| 168 |
+
bottleneck_attention=False,
|
| 169 |
+
num_workers=0,
|
| 170 |
+
mode='default',
|
| 171 |
+
renormalize=True,
|
| 172 |
+
additive_skips=False,
|
| 173 |
+
use_freq=False,
|
| 174 |
+
val_interval=1000,
|
| 175 |
+
run_tests=False,
|
| 176 |
+
)
|
| 177 |
+
defaults.update(model_and_diffusion_defaults())
|
| 178 |
+
parser = argparse.ArgumentParser()
|
| 179 |
+
add_dict_to_argparser(parser, defaults)
|
| 180 |
+
return parser
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
if __name__ == "__main__":
|
| 184 |
+
main()
|
utils/preproc_lidc-idri.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Script for preprocessing the LIDC-IDRI dataset.
|
| 3 |
+
"""
|
| 4 |
+
import argparse
|
| 5 |
+
import os
|
| 6 |
+
import shutil
|
| 7 |
+
import dicom2nifti
|
| 8 |
+
import nibabel as nib
|
| 9 |
+
import numpy as np
|
| 10 |
+
from scipy.ndimage import zoom
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def preprocess_nifti(input_path, output_path):
|
| 14 |
+
# Load the Nifti image
|
| 15 |
+
print('Process image: {}'.format(input_path))
|
| 16 |
+
img = nib.load(input_path)
|
| 17 |
+
|
| 18 |
+
# Get the current voxel sizes
|
| 19 |
+
voxel_sizes = img.header.get_zooms()
|
| 20 |
+
|
| 21 |
+
# Calculate the target voxel size (1mm x 1mm x 1mm)
|
| 22 |
+
target_voxel_size = (1.0, 1.0, 1.0)
|
| 23 |
+
|
| 24 |
+
# Calculate the resampling factor
|
| 25 |
+
zoom_factors = [current / target for target, current in zip(target_voxel_size, voxel_sizes)]
|
| 26 |
+
|
| 27 |
+
# Resample the image
|
| 28 |
+
print("[1] Resample the image ...")
|
| 29 |
+
resampled_data = zoom(img.get_fdata(), zoom_factors, order=3, mode='nearest')
|
| 30 |
+
|
| 31 |
+
print("[2] Center crop the image ...")
|
| 32 |
+
crop_size = (256, 256, 256)
|
| 33 |
+
depth, height, width = resampled_data.shape
|
| 34 |
+
|
| 35 |
+
d_start = (depth - crop_size[0]) // 2
|
| 36 |
+
h_start = (height - crop_size[1]) // 2
|
| 37 |
+
w_start = (width - crop_size[2]) // 2
|
| 38 |
+
cropped_arr = resampled_data[d_start:d_start + crop_size[0], h_start:h_start + crop_size[1], w_start:w_start + crop_size[2]]
|
| 39 |
+
|
| 40 |
+
print("[3] Clip all values below -1000 ...")
|
| 41 |
+
cropped_arr[cropped_arr < -1000] = -1000
|
| 42 |
+
|
| 43 |
+
print("[4] Clip the upper quantile (0.999) to remove outliers ...")
|
| 44 |
+
out_clipped = np.clip(cropped_arr, -1000, np.quantile(cropped_arr, 0.999))
|
| 45 |
+
|
| 46 |
+
print("[5] Normalize the image ...")
|
| 47 |
+
out_normalized = (out_clipped - np.min(out_clipped)) / (np.max(out_clipped) - np.min(out_clipped))
|
| 48 |
+
|
| 49 |
+
assert out_normalized.shape == (256, 256, 256), "The output shape should be (320,320,320)"
|
| 50 |
+
|
| 51 |
+
print("[6] FINAL REPORT: Min value: {}, Max value: {}, Shape: {}".format(out_normalized.min(),
|
| 52 |
+
out_normalized.max(),
|
| 53 |
+
out_normalized.shape))
|
| 54 |
+
print("-------------------------------------------------------------------------------")
|
| 55 |
+
# Save the resampled image
|
| 56 |
+
resampled_img = nib.Nifti1Image(out_normalized, np.eye(4))
|
| 57 |
+
nib.save(resampled_img, output_path)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
if __name__ == "__main__":
|
| 61 |
+
parser = argparse.ArgumentParser()
|
| 62 |
+
parser.add_argument('--dicom_dir', type=str, required=True,
|
| 63 |
+
help='Directory containing the original dicom data')
|
| 64 |
+
parser.add_argument('--nifti_dir', type=str, required=True,
|
| 65 |
+
help='Directory to store the processed nifti files')
|
| 66 |
+
parser.add_argument('--delete_unprocessed', type=eval, default=True,
|
| 67 |
+
help='Set true to delete the unprocessed nifti files')
|
| 68 |
+
args = parser.parse_args()
|
| 69 |
+
|
| 70 |
+
# Convert DICOM to nifti
|
| 71 |
+
for patient in os.listdir(args.dicom_dir):
|
| 72 |
+
print('Convert {} to nifti'.format(patient))
|
| 73 |
+
if not os.path.exists(os.path.join(args.nifti_dir, patient)):
|
| 74 |
+
os.makedirs(os.path.join(args.nifti_dir, patient))
|
| 75 |
+
dicom2nifti.convert_directory(os.path.join(args.dicom_dir, patient),
|
| 76 |
+
os.path.join(args.nifti_dir, patient))
|
| 77 |
+
shutil.rmtree(os.path.join(args.dicom_dir, patient))
|
| 78 |
+
|
| 79 |
+
# Preprocess nifti files
|
| 80 |
+
for root, dirs, files in os.walk(args.nifti_dir):
|
| 81 |
+
for file in files:
|
| 82 |
+
try:
|
| 83 |
+
preprocess_nifti(os.path.join(root, file), os.path.join(root, 'processed.nii.gz'))
|
| 84 |
+
except:
|
| 85 |
+
print("Error occurred for file: {}".format(file))
|
| 86 |
+
|
| 87 |
+
# Delete unprocessed nifti files
|
| 88 |
+
if args.delete_unprocessed:
|
| 89 |
+
for root, dirs, files in os.walk(args.nifti_dir):
|
| 90 |
+
for file in files:
|
| 91 |
+
if not file == 'processed.nii.gz':
|
| 92 |
+
os.remove(os.path.join(root, file))
|