diff --git a/wdm-3d-initial/.gitignore b/wdm-3d-initial/.gitignore
deleted file mode 100644
index 8bc17019746344d74ac7d68eb8a88b0e77e4e992..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/.gitignore
+++ /dev/null
@@ -1,167 +0,0 @@
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# Defined folders
-./data/
-./results/
-./runs/
-
-*.npy
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-cover/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-# For a library or package, you might want to ignore these files since the code is
-# intended to run in multiple environments; otherwise, check them in:
-# .python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# poetry
-# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
-# This is especially recommended for binary packages to ensure reproducibility, and is more
-# commonly ignored for libraries.
-# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
-#poetry.lock
-
-# pdm
-# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
-#pdm.lock
-# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
-# in version control.
-# https://pdm.fming.dev/#use-with-ide
-.pdm.toml
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-# PyCharm
-# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
-# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
-# and can be added to the global gitignore or merged into this file. For a more nuclear
-# option (not recommended) you can uncomment the following to ignore the entire idea folder.
-.idea/
\ No newline at end of file
diff --git a/wdm-3d-initial/DWT_IDWT/DWT_IDWT_Functions.py b/wdm-3d-initial/DWT_IDWT/DWT_IDWT_Functions.py
deleted file mode 100644
index 20e5192a48d05b237b0a482992429665abf2625b..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/DWT_IDWT/DWT_IDWT_Functions.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# Copyright (c) 2019, Adobe Inc. All rights reserved.
-#
-# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike
-# 4.0 International Public License. To view a copy of this license, visit
-# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode.
-
-"""
-自定义pytorch函数,实现一维、二维、三维张量的DWT和IDWT,未考虑边界延拓
-只有当图像行列数都是偶数,且重构滤波器组低频分量长度为2时,才能精确重构,否则在边界处有误差。
-"""
-import torch
-from torch.autograd import Function
-
-
-class DWTFunction_1D(Function):
- @staticmethod
- def forward(ctx, input, matrix_Low, matrix_High):
- ctx.save_for_backward(matrix_Low, matrix_High)
- L = torch.matmul(input, matrix_Low.t())
- H = torch.matmul(input, matrix_High.t())
- return L, H
-
- @staticmethod
- def backward(ctx, grad_L, grad_H):
- matrix_L, matrix_H = ctx.saved_variables
- grad_input = torch.add(torch.matmul(
- grad_L, matrix_L), torch.matmul(grad_H, matrix_H))
- return grad_input, None, None
-
-
-class IDWTFunction_1D(Function):
- @staticmethod
- def forward(ctx, input_L, input_H, matrix_L, matrix_H):
- ctx.save_for_backward(matrix_L, matrix_H)
- output = torch.add(torch.matmul(input_L, matrix_L),
- torch.matmul(input_H, matrix_H))
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- matrix_L, matrix_H = ctx.saved_variables
- grad_L = torch.matmul(grad_output, matrix_L.t())
- grad_H = torch.matmul(grad_output, matrix_H.t())
- return grad_L, grad_H, None, None
-
-
-class DWTFunction_2D(Function):
- @staticmethod
- def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
- ctx.save_for_backward(matrix_Low_0, matrix_Low_1,
- matrix_High_0, matrix_High_1)
- L = torch.matmul(matrix_Low_0, input)
- H = torch.matmul(matrix_High_0, input)
- LL = torch.matmul(L, matrix_Low_1)
- LH = torch.matmul(L, matrix_High_1)
- HL = torch.matmul(H, matrix_Low_1)
- HH = torch.matmul(H, matrix_High_1)
- return LL, LH, HL, HH
-
- @staticmethod
- def backward(ctx, grad_LL, grad_LH, grad_HL, grad_HH):
- matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
- grad_L = torch.add(torch.matmul(grad_LL, matrix_Low_1.t()),
- torch.matmul(grad_LH, matrix_High_1.t()))
- grad_H = torch.add(torch.matmul(grad_HL, matrix_Low_1.t()),
- torch.matmul(grad_HH, matrix_High_1.t()))
- grad_input = torch.add(torch.matmul(
- matrix_Low_0.t(), grad_L), torch.matmul(matrix_High_0.t(), grad_H))
- return grad_input, None, None, None, None
-
-
-class DWTFunction_2D_tiny(Function):
- @staticmethod
- def forward(ctx, input, matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
- ctx.save_for_backward(matrix_Low_0, matrix_Low_1,
- matrix_High_0, matrix_High_1)
- L = torch.matmul(matrix_Low_0, input)
- LL = torch.matmul(L, matrix_Low_1)
- return LL
-
- @staticmethod
- def backward(ctx, grad_LL):
- matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
- grad_L = torch.matmul(grad_LL, matrix_Low_1.t())
- grad_input = torch.matmul(matrix_Low_0.t(), grad_L)
- return grad_input, None, None, None, None
-
-
-class IDWTFunction_2D(Function):
- @staticmethod
- def forward(ctx, input_LL, input_LH, input_HL, input_HH,
- matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1):
- ctx.save_for_backward(matrix_Low_0, matrix_Low_1,
- matrix_High_0, matrix_High_1)
- L = torch.add(torch.matmul(input_LL, matrix_Low_1.t()),
- torch.matmul(input_LH, matrix_High_1.t()))
- H = torch.add(torch.matmul(input_HL, matrix_Low_1.t()),
- torch.matmul(input_HH, matrix_High_1.t()))
- output = torch.add(torch.matmul(matrix_Low_0.t(), L),
- torch.matmul(matrix_High_0.t(), H))
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- matrix_Low_0, matrix_Low_1, matrix_High_0, matrix_High_1 = ctx.saved_variables
- grad_L = torch.matmul(matrix_Low_0, grad_output)
- grad_H = torch.matmul(matrix_High_0, grad_output)
- grad_LL = torch.matmul(grad_L, matrix_Low_1)
- grad_LH = torch.matmul(grad_L, matrix_High_1)
- grad_HL = torch.matmul(grad_H, matrix_Low_1)
- grad_HH = torch.matmul(grad_H, matrix_High_1)
- return grad_LL, grad_LH, grad_HL, grad_HH, None, None, None, None
-
-
-class DWTFunction_3D(Function):
- @staticmethod
- def forward(ctx, input,
- matrix_Low_0, matrix_Low_1, matrix_Low_2,
- matrix_High_0, matrix_High_1, matrix_High_2):
- ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_Low_2,
- matrix_High_0, matrix_High_1, matrix_High_2)
- L = torch.matmul(matrix_Low_0, input)
- H = torch.matmul(matrix_High_0, input)
- LL = torch.matmul(L, matrix_Low_1).transpose(dim0=2, dim1=3)
- LH = torch.matmul(L, matrix_High_1).transpose(dim0=2, dim1=3)
- HL = torch.matmul(H, matrix_Low_1).transpose(dim0=2, dim1=3)
- HH = torch.matmul(H, matrix_High_1).transpose(dim0=2, dim1=3)
- LLL = torch.matmul(matrix_Low_2, LL).transpose(dim0=2, dim1=3)
- LLH = torch.matmul(matrix_Low_2, LH).transpose(dim0=2, dim1=3)
- LHL = torch.matmul(matrix_Low_2, HL).transpose(dim0=2, dim1=3)
- LHH = torch.matmul(matrix_Low_2, HH).transpose(dim0=2, dim1=3)
- HLL = torch.matmul(matrix_High_2, LL).transpose(dim0=2, dim1=3)
- HLH = torch.matmul(matrix_High_2, LH).transpose(dim0=2, dim1=3)
- HHL = torch.matmul(matrix_High_2, HL).transpose(dim0=2, dim1=3)
- HHH = torch.matmul(matrix_High_2, HH).transpose(dim0=2, dim1=3)
- return LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH
-
- @staticmethod
- def backward(ctx, grad_LLL, grad_LLH, grad_LHL, grad_LHH,
- grad_HLL, grad_HLH, grad_HHL, grad_HHH):
- matrix_Low_0, matrix_Low_1, matrix_Low_2, matrix_High_0, matrix_High_1, matrix_High_2 = ctx.saved_variables
- grad_LL = torch.add(torch.matmul(matrix_Low_2.t(), grad_LLL.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), grad_HLL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- grad_LH = torch.add(torch.matmul(matrix_Low_2.t(), grad_LLH.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), grad_HLH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- grad_HL = torch.add(torch.matmul(matrix_Low_2.t(), grad_LHL.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), grad_HHL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- grad_HH = torch.add(torch.matmul(matrix_Low_2.t(), grad_LHH.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), grad_HHH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- grad_L = torch.add(torch.matmul(grad_LL, matrix_Low_1.t()),
- torch.matmul(grad_LH, matrix_High_1.t()))
- grad_H = torch.add(torch.matmul(grad_HL, matrix_Low_1.t()),
- torch.matmul(grad_HH, matrix_High_1.t()))
- grad_input = torch.add(torch.matmul(
- matrix_Low_0.t(), grad_L), torch.matmul(matrix_High_0.t(), grad_H))
- return grad_input, None, None, None, None, None, None, None, None
-
-
-class IDWTFunction_3D(Function):
- @staticmethod
- def forward(ctx, input_LLL, input_LLH, input_LHL, input_LHH,
- input_HLL, input_HLH, input_HHL, input_HHH,
- matrix_Low_0, matrix_Low_1, matrix_Low_2,
- matrix_High_0, matrix_High_1, matrix_High_2):
- ctx.save_for_backward(matrix_Low_0, matrix_Low_1, matrix_Low_2,
- matrix_High_0, matrix_High_1, matrix_High_2)
- input_LL = torch.add(torch.matmul(matrix_Low_2.t(), input_LLL.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), input_HLL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- input_LH = torch.add(torch.matmul(matrix_Low_2.t(), input_LLH.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), input_HLH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- input_HL = torch.add(torch.matmul(matrix_Low_2.t(), input_LHL.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), input_HHL.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- input_HH = torch.add(torch.matmul(matrix_Low_2.t(), input_LHH.transpose(dim0=2, dim1=3)), torch.matmul(
- matrix_High_2.t(), input_HHH.transpose(dim0=2, dim1=3))).transpose(dim0=2, dim1=3)
- input_L = torch.add(torch.matmul(input_LL, matrix_Low_1.t()),
- torch.matmul(input_LH, matrix_High_1.t()))
- input_H = torch.add(torch.matmul(input_HL, matrix_Low_1.t()),
- torch.matmul(input_HH, matrix_High_1.t()))
- output = torch.add(torch.matmul(matrix_Low_0.t(), input_L),
- torch.matmul(matrix_High_0.t(), input_H))
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- matrix_Low_0, matrix_Low_1, matrix_Low_2, matrix_High_0, matrix_High_1, matrix_High_2 = ctx.saved_variables
- grad_L = torch.matmul(matrix_Low_0, grad_output)
- grad_H = torch.matmul(matrix_High_0, grad_output)
- grad_LL = torch.matmul(grad_L, matrix_Low_1).transpose(dim0=2, dim1=3)
- grad_LH = torch.matmul(grad_L, matrix_High_1).transpose(dim0=2, dim1=3)
- grad_HL = torch.matmul(grad_H, matrix_Low_1).transpose(dim0=2, dim1=3)
- grad_HH = torch.matmul(grad_H, matrix_High_1).transpose(dim0=2, dim1=3)
- grad_LLL = torch.matmul(
- matrix_Low_2, grad_LL).transpose(dim0=2, dim1=3)
- grad_LLH = torch.matmul(
- matrix_Low_2, grad_LH).transpose(dim0=2, dim1=3)
- grad_LHL = torch.matmul(
- matrix_Low_2, grad_HL).transpose(dim0=2, dim1=3)
- grad_LHH = torch.matmul(
- matrix_Low_2, grad_HH).transpose(dim0=2, dim1=3)
- grad_HLL = torch.matmul(
- matrix_High_2, grad_LL).transpose(dim0=2, dim1=3)
- grad_HLH = torch.matmul(
- matrix_High_2, grad_LH).transpose(dim0=2, dim1=3)
- grad_HHL = torch.matmul(
- matrix_High_2, grad_HL).transpose(dim0=2, dim1=3)
- grad_HHH = torch.matmul(
- matrix_High_2, grad_HH).transpose(dim0=2, dim1=3)
- return grad_LLL, grad_LLH, grad_LHL, grad_LHH, grad_HLL, grad_HLH, grad_HHL, grad_HHH, None, None, None, None, None, None
\ No newline at end of file
diff --git a/wdm-3d-initial/DWT_IDWT/DWT_IDWT_layer.py b/wdm-3d-initial/DWT_IDWT/DWT_IDWT_layer.py
deleted file mode 100644
index 04ec023a9dd42ed618aa90e92c9e12ae5320665f..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/DWT_IDWT/DWT_IDWT_layer.py
+++ /dev/null
@@ -1,666 +0,0 @@
-"""
-自定义 pytorch 层,实现一维、二维、三维张量的 DWT 和 IDWT,未考虑边界延拓
-只有当图像行列数都是偶数,且重构滤波器组低频分量长度为 2 时,才能精确重构,否则在边界处有误差。
-"""
-import math
-
-import numpy as np
-import pywt
-import torch
-from torch.nn import Module
-
-from .DWT_IDWT_Functions import DWTFunction_1D, IDWTFunction_1D, \
- DWTFunction_2D_tiny, DWTFunction_2D, IDWTFunction_2D, \
- DWTFunction_3D, IDWTFunction_3D
-
-
-__all__ = ['DWT_1D', 'IDWT_1D', 'DWT_2D',
- 'IDWT_2D', 'DWT_3D', 'IDWT_3D', 'DWT_2D_tiny']
-
-
-class DWT_1D(Module):
- """
- input: the 1D data to be decomposed -- (N, C, Length)
- output: lfc -- (N, C, Length/2)
- hfc -- (N, C, Length/2)
- """
-
- def __init__(self, wavename):
- """
- 1D discrete wavelet transform (DWT) for sequence decomposition
- 用于序列分解的一维离散小波变换 DWT
- :param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
- """
- super(DWT_1D, self).__init__()
- wavelet = pywt.Wavelet(wavename)
- self.band_low = wavelet.rec_lo
- self.band_high = wavelet.rec_hi
- assert len(self.band_low) == len(self.band_high)
- self.band_length = len(self.band_low)
- assert self.band_length % 2 == 0
- self.band_length_half = math.floor(self.band_length / 2)
-
- def get_matrix(self):
- """
- 生成变换矩阵
- generating the matrices: \mathcal{L}, \mathcal{H}
- :return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
- """
- L1 = self.input_height
- L = math.floor(L1 / 2)
- matrix_h = np.zeros((L, L1 + self.band_length - 2))
- matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
- end = None if self.band_length_half == 1 else (
- - self.band_length_half + 1)
- index = 0
- for i in range(L):
- for j in range(self.band_length):
- matrix_h[i, index + j] = self.band_low[j]
- index += 2
- index = 0
- for i in range(L1 - L):
- for j in range(self.band_length):
- matrix_g[i, index + j] = self.band_high[j]
- index += 2
- matrix_h = matrix_h[:, (self.band_length_half - 1):end]
- matrix_g = matrix_g[:, (self.band_length_half - 1):end]
- if torch.cuda.is_available():
- self.matrix_low = torch.Tensor(matrix_h).cuda()
- self.matrix_high = torch.Tensor(matrix_g).cuda()
- else:
- self.matrix_low = torch.Tensor(matrix_h)
- self.matrix_high = torch.Tensor(matrix_g)
-
- def forward(self, input):
- """
- input_low_frequency_component = \mathcal{L} * input
- input_high_frequency_component = \mathcal{H} * input
- :param input: the data to be decomposed
- :return: the low-frequency and high-frequency components of the input data
- """
- assert len(input.size()) == 3
- self.input_height = input.size()[-1]
- self.get_matrix()
- return DWTFunction_1D.apply(input, self.matrix_low, self.matrix_high)
-
-
-class IDWT_1D(Module):
- """
- input: lfc -- (N, C, Length/2)
- hfc -- (N, C, Length/2)
- output: the original data -- (N, C, Length)
- """
-
- def __init__(self, wavename):
- """
- 1D inverse DWT (IDWT) for sequence reconstruction
- 用于序列重构的一维离散小波逆变换 IDWT
- :param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
- """
- super(IDWT_1D, self).__init__()
- wavelet = pywt.Wavelet(wavename)
- self.band_low = wavelet.dec_lo
- self.band_high = wavelet.dec_hi
- self.band_low.reverse()
- self.band_high.reverse()
- assert len(self.band_low) == len(self.band_high)
- self.band_length = len(self.band_low)
- assert self.band_length % 2 == 0
- self.band_length_half = math.floor(self.band_length / 2)
-
- def get_matrix(self):
- """
- generating the matrices: \mathcal{L}, \mathcal{H}
- 生成变换矩阵
- :return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
- """
- L1 = self.input_height
- L = math.floor(L1 / 2)
- matrix_h = np.zeros((L, L1 + self.band_length - 2))
- matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
- end = None if self.band_length_half == 1 else (
- - self.band_length_half + 1)
- index = 0
- for i in range(L):
- for j in range(self.band_length):
- matrix_h[i, index + j] = self.band_low[j]
- index += 2
- index = 0
- for i in range(L1 - L):
- for j in range(self.band_length):
- matrix_g[i, index + j] = self.band_high[j]
- index += 2
- matrix_h = matrix_h[:, (self.band_length_half - 1):end]
- matrix_g = matrix_g[:, (self.band_length_half - 1):end]
- if torch.cuda.is_available():
- self.matrix_low = torch.Tensor(matrix_h).cuda()
- self.matrix_high = torch.Tensor(matrix_g).cuda()
- else:
- self.matrix_low = torch.Tensor(matrix_h)
- self.matrix_high = torch.Tensor(matrix_g)
-
- def forward(self, L, H):
- """
- :param L: the low-frequency component of the original data
- :param H: the high-frequency component of the original data
- :return: the original data
- """
- assert len(L.size()) == len(H.size()) == 3
- self.input_height = L.size()[-1] + H.size()[-1]
- self.get_matrix()
- return IDWTFunction_1D.apply(L, H, self.matrix_low, self.matrix_high)
-
-
-class DWT_2D_tiny(Module):
- """
- input: the 2D data to be decomposed -- (N, C, H, W)
- output -- lfc: (N, C, H/2, W/2)
- #hfc_lh: (N, C, H/2, W/2)
- #hfc_hl: (N, C, H/2, W/2)
- #hfc_hh: (N, C, H/2, W/2)
- DWT_2D_tiny only outputs the low-frequency component, which is used in WaveCNet;
- the all four components could be get using DWT_2D, which is used in WaveUNet.
- """
-
- def __init__(self, wavename):
- """
- 2D discrete wavelet transform (DWT) for 2D image decomposition
- :param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
- """
- super(DWT_2D_tiny, self).__init__()
- wavelet = pywt.Wavelet(wavename)
- self.band_low = wavelet.rec_lo
- self.band_high = wavelet.rec_hi
- assert len(self.band_low) == len(self.band_high)
- self.band_length = len(self.band_low)
- assert self.band_length % 2 == 0
- self.band_length_half = math.floor(self.band_length / 2)
-
- def get_matrix(self):
- """
- 生成变换矩阵
- generating the matrices: \mathcal{L}, \mathcal{H}
- :return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
- """
- L1 = np.max((self.input_height, self.input_width))
- L = math.floor(L1 / 2)
- matrix_h = np.zeros((L, L1 + self.band_length - 2))
- matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
- end = None if self.band_length_half == 1 else (
- - self.band_length_half + 1)
-
- index = 0
- for i in range(L):
- for j in range(self.band_length):
- matrix_h[i, index + j] = self.band_low[j]
- index += 2
- matrix_h_0 = matrix_h[0:(math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_h_1 = matrix_h[0:(math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
-
- index = 0
- for i in range(L1 - L):
- for j in range(self.band_length):
- matrix_g[i, index + j] = self.band_high[j]
- index += 2
- matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
-
- matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
- matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
- matrix_h_1 = np.transpose(matrix_h_1)
- matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
- matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
- matrix_g_1 = np.transpose(matrix_g_1)
-
- if torch.cuda.is_available():
- self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
- self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
- self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
- self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
- else:
- self.matrix_low_0 = torch.Tensor(matrix_h_0)
- self.matrix_low_1 = torch.Tensor(matrix_h_1)
- self.matrix_high_0 = torch.Tensor(matrix_g_0)
- self.matrix_high_1 = torch.Tensor(matrix_g_1)
-
- def forward(self, input):
- """
- input_lfc = \mathcal{L} * input * \mathcal{L}^T
- #input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
- #input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
- #input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
- :param input: the 2D data to be decomposed
- :return: the low-frequency component of the input 2D data
- """
- assert len(input.size()) == 4
- self.input_height = input.size()[-2]
- self.input_width = input.size()[-1]
- self.get_matrix()
- return DWTFunction_2D_tiny.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
-
-
-class DWT_2D(Module):
- """
- input: the 2D data to be decomposed -- (N, C, H, W)
- output -- lfc: (N, C, H/2, W/2)
- hfc_lh: (N, C, H/2, W/2)
- hfc_hl: (N, C, H/2, W/2)
- hfc_hh: (N, C, H/2, W/2)
- """
-
- def __init__(self, wavename):
- """
- 2D discrete wavelet transform (DWT) for 2D image decomposition
- :param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
- """
- super(DWT_2D, self).__init__()
- wavelet = pywt.Wavelet(wavename)
- self.band_low = wavelet.rec_lo
- self.band_high = wavelet.rec_hi
- assert len(self.band_low) == len(self.band_high)
- self.band_length = len(self.band_low)
- assert self.band_length % 2 == 0
- self.band_length_half = math.floor(self.band_length / 2)
-
- def get_matrix(self):
- """
- 生成变换矩阵
- generating the matrices: \mathcal{L}, \mathcal{H}
- :return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
- """
- L1 = np.max((self.input_height, self.input_width))
- L = math.floor(L1 / 2)
- matrix_h = np.zeros((L, L1 + self.band_length - 2))
- matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
- end = None if self.band_length_half == 1 else (
- - self.band_length_half + 1)
-
- index = 0
- for i in range(L):
- for j in range(self.band_length):
- matrix_h[i, index + j] = self.band_low[j]
- index += 2
- matrix_h_0 = matrix_h[0:(math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_h_1 = matrix_h[0:(math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
-
- index = 0
- for i in range(L1 - L):
- for j in range(self.band_length):
- matrix_g[i, index + j] = self.band_high[j]
- index += 2
- matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
-
- matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
- matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
- matrix_h_1 = np.transpose(matrix_h_1)
- matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
- matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
- matrix_g_1 = np.transpose(matrix_g_1)
-
- if torch.cuda.is_available():
- self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
- self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
- self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
- self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
- else:
- self.matrix_low_0 = torch.Tensor(matrix_h_0)
- self.matrix_low_1 = torch.Tensor(matrix_h_1)
- self.matrix_high_0 = torch.Tensor(matrix_g_0)
- self.matrix_high_1 = torch.Tensor(matrix_g_1)
-
- def forward(self, input):
- """
- input_lfc = \mathcal{L} * input * \mathcal{L}^T
- input_hfc_lh = \mathcal{H} * input * \mathcal{L}^T
- input_hfc_hl = \mathcal{L} * input * \mathcal{H}^T
- input_hfc_hh = \mathcal{H} * input * \mathcal{H}^T
- :param input: the 2D data to be decomposed
- :return: the low-frequency and high-frequency components of the input 2D data
- """
- assert len(input.size()) == 4
- self.input_height = input.size()[-2]
- self.input_width = input.size()[-1]
- self.get_matrix()
- return DWTFunction_2D.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
-
-
-class IDWT_2D(Module):
- """
- input: lfc -- (N, C, H/2, W/2)
- hfc_lh -- (N, C, H/2, W/2)
- hfc_hl -- (N, C, H/2, W/2)
- hfc_hh -- (N, C, H/2, W/2)
- output: the original 2D data -- (N, C, H, W)
- """
-
- def __init__(self, wavename):
- """
- 2D inverse DWT (IDWT) for 2D image reconstruction
- :param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
- """
- super(IDWT_2D, self).__init__()
- wavelet = pywt.Wavelet(wavename)
- self.band_low = wavelet.dec_lo
- self.band_low.reverse()
- self.band_high = wavelet.dec_hi
- self.band_high.reverse()
- assert len(self.band_low) == len(self.band_high)
- self.band_length = len(self.band_low)
- assert self.band_length % 2 == 0
- self.band_length_half = math.floor(self.band_length / 2)
-
- def get_matrix(self):
- """
- 生成变换矩阵
- generating the matrices: \mathcal{L}, \mathcal{H}
- :return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
- """
- L1 = np.max((self.input_height, self.input_width))
- L = math.floor(L1 / 2)
- matrix_h = np.zeros((L, L1 + self.band_length - 2))
- matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
- end = None if self.band_length_half == 1 else (
- - self.band_length_half + 1)
-
- index = 0
- for i in range(L):
- for j in range(self.band_length):
- matrix_h[i, index + j] = self.band_low[j]
- index += 2
- matrix_h_0 = matrix_h[0:(math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_h_1 = matrix_h[0:(math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
-
- index = 0
- for i in range(L1 - L):
- for j in range(self.band_length):
- matrix_g[i, index + j] = self.band_high[j]
- index += 2
- matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
-
- matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
- matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
- matrix_h_1 = np.transpose(matrix_h_1)
- matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
- matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
- matrix_g_1 = np.transpose(matrix_g_1)
- if torch.cuda.is_available():
- self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
- self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
- self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
- self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
- else:
- self.matrix_low_0 = torch.Tensor(matrix_h_0)
- self.matrix_low_1 = torch.Tensor(matrix_h_1)
- self.matrix_high_0 = torch.Tensor(matrix_g_0)
- self.matrix_high_1 = torch.Tensor(matrix_g_1)
-
- def forward(self, LL, LH, HL, HH):
- """
- recontructing the original 2D data
- the original 2D data = \mathcal{L}^T * lfc * \mathcal{L}
- + \mathcal{H}^T * hfc_lh * \mathcal{L}
- + \mathcal{L}^T * hfc_hl * \mathcal{H}
- + \mathcal{H}^T * hfc_hh * \mathcal{H}
- :param LL: the low-frequency component
- :param LH: the high-frequency component, hfc_lh
- :param HL: the high-frequency component, hfc_hl
- :param HH: the high-frequency component, hfc_hh
- :return: the original 2D data
- """
- assert len(LL.size()) == len(LH.size()) == len(
- HL.size()) == len(HH.size()) == 4
- self.input_height = LL.size()[-2] + HH.size()[-2]
- self.input_width = LL.size()[-1] + HH.size()[-1]
- self.get_matrix()
- return IDWTFunction_2D.apply(LL, LH, HL, HH, self.matrix_low_0, self.matrix_low_1, self.matrix_high_0, self.matrix_high_1)
-
-
-class DWT_3D(Module):
- """
- input: the 3D data to be decomposed -- (N, C, D, H, W)
- output: lfc -- (N, C, D/2, H/2, W/2)
- hfc_llh -- (N, C, D/2, H/2, W/2)
- hfc_lhl -- (N, C, D/2, H/2, W/2)
- hfc_lhh -- (N, C, D/2, H/2, W/2)
- hfc_hll -- (N, C, D/2, H/2, W/2)
- hfc_hlh -- (N, C, D/2, H/2, W/2)
- hfc_hhl -- (N, C, D/2, H/2, W/2)
- hfc_hhh -- (N, C, D/2, H/2, W/2)
- """
-
- def __init__(self, wavename):
- """
- 3D discrete wavelet transform (DWT) for 3D data decomposition
- :param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
- """
- super(DWT_3D, self).__init__()
- wavelet = pywt.Wavelet(wavename)
- self.band_low = wavelet.rec_lo
- self.band_high = wavelet.rec_hi
- assert len(self.band_low) == len(self.band_high)
- self.band_length = len(self.band_low)
- assert self.band_length % 2 == 0
- self.band_length_half = math.floor(self.band_length / 2)
-
- def get_matrix(self):
- """
- 生成变换矩阵
- generating the matrices: \mathcal{L}, \mathcal{H}
- :return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
- """
- L1 = np.max((self.input_height, self.input_width))
- L = math.floor(L1 / 2)
- matrix_h = np.zeros((L, L1 + self.band_length - 2))
- matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
- end = None if self.band_length_half == 1 else (
- - self.band_length_half + 1)
-
- index = 0
- for i in range(L):
- for j in range(self.band_length):
- matrix_h[i, index + j] = self.band_low[j]
- index += 2
- matrix_h_0 = matrix_h[0:(math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_h_1 = matrix_h[0:(math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
- matrix_h_2 = matrix_h[0:(math.floor(
- self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
-
- index = 0
- for i in range(L1 - L):
- for j in range(self.band_length):
- matrix_g[i, index + j] = self.band_high[j]
- index += 2
- matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
- matrix_g_2 = matrix_g[0:(self.input_depth - math.floor(
- self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
-
- matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
- matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
- matrix_h_1 = np.transpose(matrix_h_1)
- matrix_h_2 = matrix_h_2[:, (self.band_length_half - 1):end]
-
- matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
- matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
- matrix_g_1 = np.transpose(matrix_g_1)
- matrix_g_2 = matrix_g_2[:, (self.band_length_half - 1):end]
- if torch.cuda.is_available():
- self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
- self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
- self.matrix_low_2 = torch.Tensor(matrix_h_2).cuda()
- self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
- self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
- self.matrix_high_2 = torch.Tensor(matrix_g_2).cuda()
- else:
- self.matrix_low_0 = torch.Tensor(matrix_h_0)
- self.matrix_low_1 = torch.Tensor(matrix_h_1)
- self.matrix_low_2 = torch.Tensor(matrix_h_2)
- self.matrix_high_0 = torch.Tensor(matrix_g_0)
- self.matrix_high_1 = torch.Tensor(matrix_g_1)
- self.matrix_high_2 = torch.Tensor(matrix_g_2)
-
- def forward(self, input):
- """
- :param input: the 3D data to be decomposed
- :return: the eight components of the input data, one low-frequency and seven high-frequency components
- """
- assert len(input.size()) == 5
- self.input_depth = input.size()[-3]
- self.input_height = input.size()[-2]
- self.input_width = input.size()[-1]
- self.get_matrix()
- return DWTFunction_3D.apply(input, self.matrix_low_0, self.matrix_low_1, self.matrix_low_2,
- self.matrix_high_0, self.matrix_high_1, self.matrix_high_2)
-
-
-class IDWT_3D(Module):
- """
- input: lfc -- (N, C, D/2, H/2, W/2)
- hfc_llh -- (N, C, D/2, H/2, W/2)
- hfc_lhl -- (N, C, D/2, H/2, W/2)
- hfc_lhh -- (N, C, D/2, H/2, W/2)
- hfc_hll -- (N, C, D/2, H/2, W/2)
- hfc_hlh -- (N, C, D/2, H/2, W/2)
- hfc_hhl -- (N, C, D/2, H/2, W/2)
- hfc_hhh -- (N, C, D/2, H/2, W/2)
- output: the original 3D data -- (N, C, D, H, W)
- """
-
- def __init__(self, wavename):
- """
- 3D inverse DWT (IDWT) for 3D data reconstruction
- :param wavename: pywt.wavelist(); in the paper, 'chx.y' denotes 'biorx.y'.
- """
- super(IDWT_3D, self).__init__()
- wavelet = pywt.Wavelet(wavename)
- self.band_low = wavelet.dec_lo
- self.band_high = wavelet.dec_hi
- self.band_low.reverse()
- self.band_high.reverse()
- assert len(self.band_low) == len(self.band_high)
- self.band_length = len(self.band_low)
- assert self.band_length % 2 == 0
- self.band_length_half = math.floor(self.band_length / 2)
-
- def get_matrix(self):
- """
- 生成变换矩阵
- generating the matrices: \mathcal{L}, \mathcal{H}
- :return: self.matrix_low = \mathcal{L}, self.matrix_high = \mathcal{H}
- """
- L1 = np.max((self.input_height, self.input_width))
- L = math.floor(L1 / 2)
- matrix_h = np.zeros((L, L1 + self.band_length - 2))
- matrix_g = np.zeros((L1 - L, L1 + self.band_length - 2))
- end = None if self.band_length_half == 1 else (
- - self.band_length_half + 1)
-
- index = 0
- for i in range(L):
- for j in range(self.band_length):
- matrix_h[i, index + j] = self.band_low[j]
- index += 2
- matrix_h_0 = matrix_h[0:(math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_h_1 = matrix_h[0:(math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
- matrix_h_2 = matrix_h[0:(math.floor(
- self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
-
- index = 0
- for i in range(L1 - L):
- for j in range(self.band_length):
- matrix_g[i, index + j] = self.band_high[j]
- index += 2
- matrix_g_0 = matrix_g[0:(self.input_height - math.floor(
- self.input_height / 2)), 0:(self.input_height + self.band_length - 2)]
- matrix_g_1 = matrix_g[0:(self.input_width - math.floor(
- self.input_width / 2)), 0:(self.input_width + self.band_length - 2)]
- matrix_g_2 = matrix_g[0:(self.input_depth - math.floor(
- self.input_depth / 2)), 0:(self.input_depth + self.band_length - 2)]
-
- matrix_h_0 = matrix_h_0[:, (self.band_length_half - 1):end]
- matrix_h_1 = matrix_h_1[:, (self.band_length_half - 1):end]
- matrix_h_1 = np.transpose(matrix_h_1)
- matrix_h_2 = matrix_h_2[:, (self.band_length_half - 1):end]
-
- matrix_g_0 = matrix_g_0[:, (self.band_length_half - 1):end]
- matrix_g_1 = matrix_g_1[:, (self.band_length_half - 1):end]
- matrix_g_1 = np.transpose(matrix_g_1)
- matrix_g_2 = matrix_g_2[:, (self.band_length_half - 1):end]
- if torch.cuda.is_available():
- self.matrix_low_0 = torch.Tensor(matrix_h_0).cuda()
- self.matrix_low_1 = torch.Tensor(matrix_h_1).cuda()
- self.matrix_low_2 = torch.Tensor(matrix_h_2).cuda()
- self.matrix_high_0 = torch.Tensor(matrix_g_0).cuda()
- self.matrix_high_1 = torch.Tensor(matrix_g_1).cuda()
- self.matrix_high_2 = torch.Tensor(matrix_g_2).cuda()
- else:
- self.matrix_low_0 = torch.Tensor(matrix_h_0)
- self.matrix_low_1 = torch.Tensor(matrix_h_1)
- self.matrix_low_2 = torch.Tensor(matrix_h_2)
- self.matrix_high_0 = torch.Tensor(matrix_g_0)
- self.matrix_high_1 = torch.Tensor(matrix_g_1)
- self.matrix_high_2 = torch.Tensor(matrix_g_2)
-
- def forward(self, LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH):
- """
- :param LLL: the low-frequency component, lfc
- :param LLH: the high-frequency componetn, hfc_llh
- :param LHL: the high-frequency componetn, hfc_lhl
- :param LHH: the high-frequency componetn, hfc_lhh
- :param HLL: the high-frequency componetn, hfc_hll
- :param HLH: the high-frequency componetn, hfc_hlh
- :param HHL: the high-frequency componetn, hfc_hhl
- :param HHH: the high-frequency componetn, hfc_hhh
- :return: the original 3D input data
- """
- assert len(LLL.size()) == len(LLH.size()) == len(
- LHL.size()) == len(LHH.size()) == 5
- assert len(HLL.size()) == len(HLH.size()) == len(
- HHL.size()) == len(HHH.size()) == 5
- self.input_depth = LLL.size()[-3] + HHH.size()[-3]
- self.input_height = LLL.size()[-2] + HHH.size()[-2]
- self.input_width = LLL.size()[-1] + HHH.size()[-1]
- self.get_matrix()
- return IDWTFunction_3D.apply(LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH,
- self.matrix_low_0, self.matrix_low_1, self.matrix_low_2,
- self.matrix_high_0, self.matrix_high_1, self.matrix_high_2)
-
-
-if __name__ == '__main__':
- dwt = DWT_2D("haar")
- iwt = IDWT_2D("haar")
- x = torch.randn(3, 3, 24, 24).cuda()
- xll = x
- wavelet_list = []
- for i in range(3):
- xll, xlh, xhl, xhh = dwt(xll)
- wavelet_list.append([xll, xlh, xhl, xhh])
-
- # xll = wavelet_list[-1] * torch.randn(xll.shape)
- for i in range(2)[::-1]:
- xll, xlh, xhl, xhh = wavelet_list[i]
- xll = iwt(xll, xlh, xhl, xhh)
- print(xll.shape)
-
- print(torch.sum(x - xll))
- print(torch.sum(x - iwt(*wavelet_list[0])))
diff --git a/wdm-3d-initial/DWT_IDWT/__init__.py b/wdm-3d-initial/DWT_IDWT/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/wdm-3d-initial/LICENSE b/wdm-3d-initial/LICENSE
deleted file mode 100644
index 902645e4281eaacb706b4ed7126e731e7d6e695c..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2024 Paul Friedrich
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/wdm-3d-initial/README.md b/wdm-3d-initial/README.md
deleted file mode 100644
index 1bc2580d736ec3951107f32d731b50fe2ea1a0a2..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/README.md
+++ /dev/null
@@ -1,145 +0,0 @@
-# WDM: 3D Wavelet Diffusion Models for High-Resolution Medical Image Synthesis
-[](https://opensource.org/licenses/MIT)
-[](https://pfriedri.github.io/wdm-3d-io/)
-[](https://arxiv.org/abs/2402.19043)
-
-This is the official PyTorch implementation of the paper **WDM: 3D Wavelet Diffusion Models for High-Resolution Medical Image Synthesis** by [Paul Friedrich](https://pfriedri.github.io/), [Julia Wolleb](https://dbe.unibas.ch/en/persons/julia-wolleb/), [Florentin Bieder](https://dbe.unibas.ch/en/persons/florentin-bieder/), [Alicia Durrer](https://dbe.unibas.ch/en/persons/alicia-durrer/) and [Philippe C. Cattin](https://dbe.unibas.ch/en/persons/philippe-claude-cattin/).
-
-
-If you find our work useful, please consider to :star: **star this repository** and :memo: **cite our paper**:
-```bibtex
-@inproceedings{friedrich2024wdm,
- title={Wdm: 3d wavelet diffusion models for high-resolution medical image synthesis},
- author={Friedrich, Paul and Wolleb, Julia and Bieder, Florentin and Durrer, Alicia and Cattin, Philippe C},
- booktitle={MICCAI Workshop on Deep Generative Models},
- pages={11--21},
- year={2024},
- organization={Springer}}
-```
-
-## Paper Abstract
-Due to the three-dimensional nature of CT- or MR-scans, generative modeling of medical images is a particularly challenging task. Existing approaches mostly apply patch-wise, slice-wise, or cascaded generation techniques to fit the high-dimensional data into the limited GPU memory. However, these approaches may introduce artifacts and potentially restrict the model's applicability for certain downstream tasks. This work presents WDM, a wavelet-based medical image synthesis framework that applies a diffusion model on wavelet decomposed images. The presented approach is a simple yet effective way of scaling diffusion models to high resolutions and can be trained on a single 40 GB GPU. Experimental results on BraTS and LIDC-IDRI unconditional image generation at a resolution of 128 x 128 x 128 show state-of-the-art image fidelity (FID) and sample diversity (MS-SSIM) scores compared to GANs, Diffusion Models, and Latent Diffusion Models. Our proposed method is the only one capable of generating high-quality images at a resolution of 256 x 256 x 256.
-
-
-
-
-
-
-## Dependencies
-We recommend using a [conda](https://github.com/conda-forge/miniforge#mambaforge) environment to install the required dependencies.
-You can create and activate such an environment called `wdm` by running the following commands:
-```sh
-mamba env create -f environment.yml
-mamba activate wdm
-```
-
-## Training & Sampling
-For training a new model or sampling from an already trained one, you can simply adapt and use the script `run.sh`. All relevant hyperparameters for reproducing our results are automatically set when using the correct `MODEL` in the general settings.
-For executing the script, simply use the following command:
-```sh
-bash run.sh
-```
-**Supported settings** (set in `run.sh` file):
-
-MODE: `'training'`, `'sampling'`
-
-MODEL: `'ours_unet_128'`, `'ours_unet_256'`, `'ours_wnet_128'`, `'ours_wnet_256'`
-
-DATASET: `'brats'`, `'lidc-idri'`
-
-## Conditional Image Synthesis / Image-to-Image Translation
-To use WDM for conditional image synthesis or paired image-to-image translation check out our repository [pfriedri/cwdm](https://github.com/pfriedri/cwdm) that implements our paper **cWDM: Conditional Wavelet Diffusion Models for Cross-Modality 3D Medical Image Synthesis**.
-
-## Pretrained Models
-We released pretrained models on [HuggingFace](https://huggingface.co/pfriedri/wdm-3d).
-
-Currently available models:
-- [BraTS 128](https://huggingface.co/pfriedri/wdm-3d/blob/main/brats_unet_128_1200k.pt): BraTS, 128 x 128 x 128, U-Net backbone, 1.2M Iterations
-- [LIDC-IDRI 128](https://huggingface.co/pfriedri/wdm-3d/blob/main/lidc-idri_unet_128_1200k.pt): LIDC-IDRI, 128 x 128 x 128, U-Net backbone, 1.2M Iterations
-
-## Data
-To ensure good reproducibility, we trained and evaluated our network on two publicly available datasets:
-* **BRATS 2023: Adult Glioma**, a dataset containing routine clinically-acquired, multi-site multiparametric magnetic resonance imaging (MRI) scans of brain tumor patients. We just used the T1-weighted images for training. The data is available [here](https://www.synapse.org/#!Synapse:syn51514105).
-
-* **LIDC-IDRI**, a dataset containing multi-site, thoracic computed tomography (CT) scans of lung cancer patients. The data is available [here](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=1966254).
-
-The provided code works for the following data structure (you might need to adapt the `DATA_DIR` variable in `run.sh`):
-```
-data
-└───BRATS
- └───BraTS-GLI-00000-000
- └───BraTS-GLI-00000-000-seg.nii.gz
- └───BraTS-GLI-00000-000-t1c.nii.gz
- └───BraTS-GLI-00000-000-t1n.nii.gz
- └───BraTS-GLI-00000-000-t2f.nii.gz
- └───BraTS-GLI-00000-000-t2w.nii.gz
- └───BraTS-GLI-00001-000
- └───BraTS-GLI-00002-000
- ...
-
-└───LIDC-IDRI
- └───LIDC-IDRI-0001
- └───preprocessed.nii.gz
- └───LIDC-IDRI-0002
- └───LIDC-IDRI-0003
- ...
-```
-We provide a script for preprocessing LIDC-IDRI. Simply run the following command with the correct path to the downloaded DICOM files `DICOM_PATH` and the directory you want to store the processed nifti files `NIFTI_PATH`:
-```sh
-python utils/preproc_lidc-idri.py --dicom_dir DICOM_PATH --nifti_dir NIFTI_PATH
-```
-
-## Evaluation
-As our code for evaluating the model performance has slightly different dependencies, we provide a second .yml file to set up the evaluation environment.
-Simply use the following command to create and activate the new environment:
-```sh
-mamba env create -f eval/eval_environment.yml
-mamba activate eval
-```
-### FID
-For computing the FID score, you need to specify the following variables and use them in the command below:
-* DATASET: `brats` or `lidc-idri`
-* IMG_SIZE: `128` or `256`
-* REAL_DATA_DIR: path to your real data
-* FAKE_DATA_DIR: path to your generated/ fake data
-* PATH_TO_FEATURE_EXTRACTOR: path to the feature extractor weights, e.g. `./eval/pretrained/resnet_50_23dataset.pt`
-* PATH_TO_ACTIVATIONS: path to the location where you want to save mus and sigmas (in case you want to reuse them), e.g. `./eval/activations/`
-* GPU_ID: gpu you want to use, e.g. `0`
-```sh
-python eval/fid.py --dataset DATASET --img_size IMG_SIZE --data_root_real REAL_DATA_DIR --data_root_fake FAKE_DATA_DIR --pretrain_path PATH_TO_FEATURE_EXTRACTOR --path_to_activations PATH_TO_ACTIVATIONS --gpu_id GPU_ID
-```
-### Mean MS-SSIM
-For computing the mean MS-SSIM, you need to specify the following variables and use them in the command below:
-* DATASET: `brats` or `lidc-idri`
-* IMG_SIZE: `128` or `256`
-* SAMPLE_DIR: path to the generated (or real) data
-
-```sh
-python eval/ms_ssim.py --dataset DATASET --img_size IMG_SIZE --sample_dir SAMPLE_DIR
-```
-## Implementation Details for Comparing Methods
-* **HA-GAN**: For implementing the paper [Hierarchical Amortized GAN for 3D High Resolution Medical Image Synthesis](https://ieeexplore.ieee.org/abstract/document/9770375), we use the publicly available [implementation](https://github.com/batmanlab/HA-GAN). We follow the implementation details presented in the original paper (Section E). The authors recommend cutting all zero slices from the volumes before training. To allow a fair comparison with other methods, we have omitted this step.
-* **3D-LDM**: For implementing the paper [Denoising Diffusion Probabilistic Models for 3D Medical Image Generation](https://www.nature.com/articles/s41598-023-34341-2), we use the publicly available [implementation](https://github.com/FirasGit/medicaldiffusion). We follow the implementation details presented in the Supplementary Material of the original paper (Supplementary Table 1).
-* **2.5D-LDM**: For implementing the paper [Make-A-Volume: Leveraging Latent Diffusion Models for Cross-Modality 3D Brain MRI Synthesis](https://link.springer.com/chapter/10.1007/978-3-031-43999-5_56), we adopted the method to work for image generation. We trained a VQ-VAE (downsampling factor 4, latent dimension 32) using an implementation from [MONAI Generative](https://github.com/Project-MONAI/GenerativeModels) and a diffusion model implementation from [OpenAI](https://github.com/openai/guided-diffusion). For implementing the pseudo 3D layers, we use a script provided by the authors. To allow for image generation, we sample all slices at once - meaning that the models batch size and the dimension of the 1D convolution is equal to the number of slices in the volume to be generated.
-* **3D DDPM**: For implementing a memory efficient baseline model, we use the 3D DDPM presented in the paper [Memory-Efficient 3D Denoising Diffusion Models for Medical Image Processing](https://openreview.net/forum?id=neXqIGpO-tn), and used the publicly available [implementation](https://github.com/FlorentinBieder/PatchDDM-3D). We use additive skip connections and train the model with the same hyperparameters as our models.
-
-All experiments were performed on a system with an AMD Epyc 7742 CPU and a NVIDIA A100 (40GB) GPU.
-
-## TODOs
-We plan to add further functionality to our framework:
-- [ ] Add compatibility for more datasets like MRNet, ADNI, or fastMRI
-- [x] Release pre-trained models
-- [ ] Extend the framework for 3D image inpainting
-- [x] Extend the framework for 3D image-to-image translation ([pfriedri/cwdm](https://github.com/pfriedri/cwdm))
-
-## Acknowledgements
-Our code is based on / inspired by the following repositories:
-* https://github.com/openai/guided-diffusion (published under [MIT License](https://github.com/openai/guided-diffusion/blob/main/LICENSE))
-* https://github.com/FlorentinBieder/PatchDDM-3D (published under [MIT License](https://github.com/FlorentinBieder/PatchDDM-3D/blob/master/LICENSE))
-* https://github.com/VinAIResearch/WaveDiff (published under [GNU General Public License v3.0](https://github.com/VinAIResearch/WaveDiff/blob/main/LICENSE))
-* https://github.com/LiQiufu/WaveCNet (published under [CC BY-NC-SA 4.0 License](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode))
-
-For computing FID scores we use a pretrained model (`resnet_50_23dataset.pth`) from:
-* https://github.com/Tencent/MedicalNet (published uner [MIT License](https://github.com/Tencent/MedicalNet/blob/master/LICENSE))
-
-Thanks for making these projects open-source.
diff --git a/wdm-3d-initial/assets/.ipynb_checkpoints/wdm-checkpoint.png b/wdm-3d-initial/assets/.ipynb_checkpoints/wdm-checkpoint.png
deleted file mode 100644
index ef455ac16f4cb488b80c6ea14f86d5fa0131f64d..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/assets/.ipynb_checkpoints/wdm-checkpoint.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:ce1a9375ffae4a85f916ffb8341b85a37842bf5002b04b67e3d171e9490b8883
-size 6620547
diff --git a/wdm-3d-initial/assets/wdm.png b/wdm-3d-initial/assets/wdm.png
deleted file mode 100644
index ef455ac16f4cb488b80c6ea14f86d5fa0131f64d..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/assets/wdm.png
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:ce1a9375ffae4a85f916ffb8341b85a37842bf5002b04b67e3d171e9490b8883
-size 6620547
diff --git a/wdm-3d-initial/environment.yml b/wdm-3d-initial/environment.yml
deleted file mode 100644
index 5279cd614c71af1c1447ee8db4b82a5ead37a0fa..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/environment.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: wdm
-channels:
- - pytorch
- - nvidia
- - conda-forge
-dependencies:
- - python=3.10.13
- - numpy=1.26.4
- - pytorch=2.2.0=py3.10_cuda11.8_cudnn8.7.0_0
- - pytorch-cuda=11.8
- - pywavelets=1.4.1
- - scipy=1.12.0
- - torchaudio=2.2.0=py310_cu118
- - torchvision=0.17.0=py310_cu118
- - pip
- - pip:
- - nibabel==5.2.0
- - blobfile==2.1.1
- - tensorboard==2.16.2
- - matplotlib==3.8.3
- - tqdm==4.66.2
- - dicom2nifti==2.4.10
diff --git a/wdm-3d-initial/eval/.ipynb_checkpoints/fid-checkpoint.py b/wdm-3d-initial/eval/.ipynb_checkpoints/fid-checkpoint.py
deleted file mode 100644
index dbad717f6d1ed1a1d57c11406f26f91e7306d0d8..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/.ipynb_checkpoints/fid-checkpoint.py
+++ /dev/null
@@ -1,214 +0,0 @@
-import numpy as np
-import torch
-from torch.utils.data import DataLoader
-import torch.nn.functional as F
-import torch.nn as nn
-import os
-import sys
-import argparse
-
-sys.path.append(".")
-sys.path.append("..")
-
-from scipy import linalg
-
-from guided_diffusion.bratsloader import BRATSVolumes
-from guided_diffusion.lidcloader import LIDCVolumes
-from model import generate_model
-
-
-def get_feature_extractor(sets):
- model, _ = generate_model(sets)
- checkpoint = torch.load(sets.pretrain_path)
- model.load_state_dict(checkpoint['state_dict'])
- model.eval()
- print("Done. Initialized feature extraction model and loaded pretrained weights.")
-
- return model
-
-
-def get_activations(model, data_loader, sets):
- pred_arr = np.empty((sets.num_samples, sets.dims))
-
- for i, batch in enumerate(data_loader):
- if isinstance(batch, list):
- batch = batch[0]
- batch = batch.cuda()
- if i % 10 == 0:
- print('\rPropagating batch %d' % i, end='', flush=True)
- with torch.no_grad():
- pred = model(batch)
-
- if i*sets.batch_size >= pred_arr.shape[0]:
- pred_arr[i*sets.batch_size:] = pred.cpu().numpy()
- break
- else:
- pred_arr[i*sets.batch_size:(i+1)*sets.batch_size] = pred.cpu().numpy()
-
- return pred_arr
-
-
-def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
- """
- Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians
- X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
-
- Params:
- -- mu1 : Numpy array containing the activations of a layer of the inception net (like returned by the function
- 'get_predictions') for generated samples.
- -- mu2 : The sample mean over activations, precalculated on a representative data set.
- -- sigma1: The covariance matrix over activations for generated samples.
- -- sigma2: The covariance matrix over activations, precalculated on a representative data set.
-
- Returns:
- -- : The Frechet Distance.
- """
-
- mu1 = np.atleast_1d(mu1)
- mu2 = np.atleast_1d(mu2)
-
- sigma1 = np.atleast_2d(sigma1)
- sigma2 = np.atleast_2d(sigma2)
-
- assert mu1.shape == mu2.shape, \
- 'Training and test mean vectors have different lengths'
- assert sigma1.shape == sigma2.shape, \
- 'Training and test covariances have different dimensions'
-
- diff = mu1 - mu2
-
- # Product might be almost singular
- covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
- if not np.isfinite(covmean).all():
- msg = ('fid calculation produces singular product; '
- 'adding %s to diagonal of cov estimates') % eps
- print(msg)
- offset = np.eye(sigma1.shape[0]) * eps
- covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
-
- # Numerical error might give slight imaginary component
- if np.iscomplexobj(covmean):
- if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
- m = np.max(np.abs(covmean.imag))
- raise ValueError('Imaginary component {}'.format(m))
- covmean = covmean.real
-
- tr_covmean = np.trace(covmean)
-
- return (diff.dot(diff) + np.trace(sigma1) +
- np.trace(sigma2) - 2 * tr_covmean)
-
-
-def process_feature_vecs(activations):
- mu = np.mean(activations, axis=0)
- sigma = np.cov(activations, rowvar=False)
-
- return mu, sigma
-
-
-def parse_opts():
- parser = argparse.ArgumentParser()
- parser.add_argument('--dataset', required=True, type=str, help='Dataset (brats | lidc-idri)')
- parser.add_argument('--img_size', required=True, type=int, help='Image size')
- parser.add_argument('--data_root_real', required=True, type=str, help='Path to real data')
- parser.add_argument('--data_root_fake', required=True, type=str, help='Path to fake data')
- parser.add_argument('--pretrain_path', required=True, type=str, help='Path to pretrained model')
- parser.add_argument('--path_to_activations', required=True, type=str, help='Path to activations')
- parser.add_argument('--n_seg_classes', default=2, type=int, help="Number of segmentation classes")
- parser.add_argument('--learning_rate', default=0.001, type=float,
- help='Initial learning rate (divided by 10 while training by lr scheduler)')
- parser.add_argument('--num_workers', default=4, type=int, help='Number of jobs')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch Size')
- parser.add_argument('--phase', default='test', type=str, help='Phase of train or test')
- parser.add_argument('--save_intervals', default=10, type=int, help='Interation for saving model')
- parser.add_argument('--n_epochs', default=200, type=int, help='Number of total epochs to run')
- parser.add_argument('--input_D', default=256, type=int, help='Input size of depth')
- parser.add_argument('--input_H', default=256, type=int, help='Input size of height')
- parser.add_argument('--input_W', default=256, type=int, help='Input size of width')
- parser.add_argument('--resume_path', default='', type=str, help='Path for resume model.')
-
- parser.add_argument('--new_layer_names', default=['conv_seg'], type=list, help='New layer except for backbone')
- parser.add_argument('--no_cuda', action='store_true', help='If true, cuda is not used.')
- parser.set_defaults(no_cuda=False)
- parser.add_argument('--gpu_id', default=0, type=int, help='Gpu id')
- parser.add_argument('--model', default='resnet', type=str,
- help='(resnet | preresnet | wideresnet | resnext | densenet | ')
- parser.add_argument('--model_depth', default=50, type=int, help='Depth of resnet (10 | 18 | 34 | 50 | 101)')
- parser.add_argument('--resnet_shortcut', default='B', type=str, help='Shortcut type of resnet (A | B)')
- parser.add_argument('--manual_seed', default=1, type=int, help='Manually set random seed')
- parser.add_argument('--ci_test', action='store_true', help='If true, ci testing is used.')
- args = parser.parse_args()
- args.save_folder = "./trails/models/{}_{}".format(args.model, args.model_depth)
-
- return args
-
-
-if __name__ == '__main__':
- # Model settings
- sets = parse_opts()
- sets.target_type = "normal"
- sets.phase = 'test'
- sets.batch_size = 1
- sets.dims = 2048
- sets.num_samples = 1000
-
- if not sets.no_cuda:
- dev_name = 'cuda:' + str(sets.gpu_id)
- device = torch.device(dev_name)
- else:
- device = torch.device('cpu')
-
- # getting model
- print("Load model ...")
- model = get_feature_extractor(sets)
- model = model.to(device)
-
- # Data loader
- print("Initialize dataloader ...")
- if sets.dataset == 'brats':
- real_data = BRATSVolumes(sets.data_root_real, normalize=None, mode='real', img_size=sets.img_size)
- fake_data = BRATSVolumes(sets.data_root_fake, normalize=None, mode='fake', img_size=sets.img_size)
-
- elif sets.dataset == 'lidc-idri':
- real_data = LIDCVolumes(sets.data_root_real, normalize=None, mode='real', img_size=sets.img_size)
- fake_data = LIDCVolumes(sets.data_root_fake, normalize=None, mode='fake', img_size=sets.img_size)
-
- else:
- print("Dataloader for this dataset is not implemented. Use 'brats' or 'lidc-idri'.")
-
- real_data_loader = DataLoader(real_data, batch_size=sets.batch_size, shuffle=False, num_workers=sets.batch_size,
- pin_memory=False)
- fake_data_loader = DataLoader(fake_data, batch_size=sets.batch_size, shuffle=False, num_workers=sets.batch_size,
- pin_memory=False)
-
-
- # Real data
- print("Get activations from real data ...")
- activations_real = get_activations(model, real_data_loader, sets)
- mu_real, sigma_real = process_feature_vecs(activations_real)
-
- path_to_mu_real = os.path.join(sets.path_to_activations, 'mu_real.npy')
- path_to_sigma_real = os.path.join(sets.path_to_activations, 'sigma_real.npy')
- np.save(path_to_mu_real, mu_real)
- print("")
- print("Saved mu_real to: " + path_to_mu_real)
- np.save(path_to_sigma_real, sigma_real)
- print("Saved sigma_real to: " + path_to_sigma_real)
-
-
- # Fake data
- print("Get activations from fake/generated data ...")
- activations_fake = get_activations(model, fake_data_loader, sets)
- mu_fake, sigma_fake = process_feature_vecs(activations_fake)
-
- path_to_mu_fake = os.path.join(sets.path_to_activations, 'mu_fake.npy')
- path_to_sigma_fake = os.path.join(sets.path_to_activations, 'sigma_fake.npy')
- np.save(path_to_mu_fake, mu_fake)
- print("")
- print("Saved mu_fake to: " + path_to_mu_fake)
- np.save(path_to_sigma_fake, sigma_fake)
- print("Saved sigma_fake to: " + path_to_sigma_fake)
-
- fid = calculate_frechet_distance(mu_real, sigma_real, mu_fake, sigma_fake)
- print("The FID score is: ")
- print(fid)
diff --git a/wdm-3d-initial/eval/.ipynb_checkpoints/ms_ssim-checkpoint.py b/wdm-3d-initial/eval/.ipynb_checkpoints/ms_ssim-checkpoint.py
deleted file mode 100644
index 02e98bea038e47fdd3d0fd19bc32761daa472259..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/.ipynb_checkpoints/ms_ssim-checkpoint.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import argparse
-import numpy as np
-import torch
-import sys
-
-sys.path.append(".")
-sys.path.append("..")
-
-from generative.metrics import MultiScaleSSIMMetric
-from monai import transforms
-from monai.config import print_config
-from monai.data import Dataset
-from monai.utils import set_determinism
-from torch.utils.data import DataLoader
-from tqdm import tqdm
-from guided_diffusion.bratsloader import BRATSVolumes
-from guided_diffusion.lidcloader import LIDCVolumes
-
-
-def parse_args():
- parser = argparse.ArgumentParser()
- parser.add_argument("--seed", type=int, default=42, help="Random seed to use.")
- parser.add_argument("--sample_dir", type=str, required=True, help="Location of the samples to evaluate.")
- parser.add_argument("--num_workers", type=int, default=8, help="Number of loader workers")
- parser.add_argument("--dataset", choices=['brats','lidc-idri'], required=True, help="Dataset (brats | lidc-idri)")
- parser.add_argument("--img_size", type=int, required=True)
-
- args = parser.parse_args()
- return args
-
-
-def main(args):
- set_determinism(seed=args.seed)
- #print_config()
-
- if args.dataset == 'brats':
- dataset_1 = BRATSVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
- dataset_2 = BRATSVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
-
- elif args.dataset == 'lidc-idri':
- dataset_1 = LIDCVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
- dataset_2 = LIDCVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
-
-
- dataloader_1 = DataLoader(dataset_1, batch_size=1, shuffle=False, num_workers=args.num_workers)
- dataloader_2 = DataLoader(dataset_2, batch_size=1, shuffle=False, num_workers=args.num_workers)
-
- device = torch.device("cuda")
- ms_ssim = MultiScaleSSIMMetric(spatial_dims=3, data_range=1.0, kernel_size=7)
-
- print("Computing MS-SSIM (this takes a while)...")
- ms_ssim_list = []
- pbar = tqdm(enumerate(dataloader_1), total=len(dataloader_1))
- for step, batch in pbar:
- img = batch[0]
- for batch2 in dataloader_2:
- img2 = batch2 [0]
- if batch[1] == batch2[1]:
- continue
- ms_ssim_list.append(ms_ssim(img.to(device), img2.to(device)).item())
- pbar.update()
-
- ms_ssim_list = np.array(ms_ssim_list)
- print("Calculated MS-SSIMs. Computing mean ...")
- print(f"Mean MS-SSIM: {ms_ssim_list.mean():.6f}")
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
diff --git a/wdm-3d-initial/eval/activations/activations.txt b/wdm-3d-initial/eval/activations/activations.txt
deleted file mode 100644
index 0340b471c9af52e70a69c2ab4932221eb0754e9d..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/activations/activations.txt
+++ /dev/null
@@ -1 +0,0 @@
-Path to store intermediate activations for computing FID scores.
diff --git a/wdm-3d-initial/eval/eval_environment.yml b/wdm-3d-initial/eval/eval_environment.yml
deleted file mode 100644
index a62439753a1c910d820046c16bb73402ac5acbeb..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/eval_environment.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: eval
-channels:
- - pytorch
- - nvidia
- - conda-forge
-dependencies:
- - numpy=1.24.4
- - pip=24.2
- - python=3.8.19
- - pytorch=2.4.0=py3.8_cuda11.8_cudnn9.1.0_0
- - pytorch-cuda=11.8
- - scipy=1.10.1
- - torchaudio=2.4.0=py38_cu118
- - torchvision=0.19.0=py38_cu118
- - pip:
- - monai==1.3.2
- - monai-generative==0.2.3
- - nibabel==5.2.1
- - tqdm==4.66.5
diff --git a/wdm-3d-initial/eval/fid.py b/wdm-3d-initial/eval/fid.py
deleted file mode 100644
index dbad717f6d1ed1a1d57c11406f26f91e7306d0d8..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/fid.py
+++ /dev/null
@@ -1,214 +0,0 @@
-import numpy as np
-import torch
-from torch.utils.data import DataLoader
-import torch.nn.functional as F
-import torch.nn as nn
-import os
-import sys
-import argparse
-
-sys.path.append(".")
-sys.path.append("..")
-
-from scipy import linalg
-
-from guided_diffusion.bratsloader import BRATSVolumes
-from guided_diffusion.lidcloader import LIDCVolumes
-from model import generate_model
-
-
-def get_feature_extractor(sets):
- model, _ = generate_model(sets)
- checkpoint = torch.load(sets.pretrain_path)
- model.load_state_dict(checkpoint['state_dict'])
- model.eval()
- print("Done. Initialized feature extraction model and loaded pretrained weights.")
-
- return model
-
-
-def get_activations(model, data_loader, sets):
- pred_arr = np.empty((sets.num_samples, sets.dims))
-
- for i, batch in enumerate(data_loader):
- if isinstance(batch, list):
- batch = batch[0]
- batch = batch.cuda()
- if i % 10 == 0:
- print('\rPropagating batch %d' % i, end='', flush=True)
- with torch.no_grad():
- pred = model(batch)
-
- if i*sets.batch_size >= pred_arr.shape[0]:
- pred_arr[i*sets.batch_size:] = pred.cpu().numpy()
- break
- else:
- pred_arr[i*sets.batch_size:(i+1)*sets.batch_size] = pred.cpu().numpy()
-
- return pred_arr
-
-
-def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
- """
- Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians
- X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
-
- Params:
- -- mu1 : Numpy array containing the activations of a layer of the inception net (like returned by the function
- 'get_predictions') for generated samples.
- -- mu2 : The sample mean over activations, precalculated on a representative data set.
- -- sigma1: The covariance matrix over activations for generated samples.
- -- sigma2: The covariance matrix over activations, precalculated on a representative data set.
-
- Returns:
- -- : The Frechet Distance.
- """
-
- mu1 = np.atleast_1d(mu1)
- mu2 = np.atleast_1d(mu2)
-
- sigma1 = np.atleast_2d(sigma1)
- sigma2 = np.atleast_2d(sigma2)
-
- assert mu1.shape == mu2.shape, \
- 'Training and test mean vectors have different lengths'
- assert sigma1.shape == sigma2.shape, \
- 'Training and test covariances have different dimensions'
-
- diff = mu1 - mu2
-
- # Product might be almost singular
- covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
- if not np.isfinite(covmean).all():
- msg = ('fid calculation produces singular product; '
- 'adding %s to diagonal of cov estimates') % eps
- print(msg)
- offset = np.eye(sigma1.shape[0]) * eps
- covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
-
- # Numerical error might give slight imaginary component
- if np.iscomplexobj(covmean):
- if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
- m = np.max(np.abs(covmean.imag))
- raise ValueError('Imaginary component {}'.format(m))
- covmean = covmean.real
-
- tr_covmean = np.trace(covmean)
-
- return (diff.dot(diff) + np.trace(sigma1) +
- np.trace(sigma2) - 2 * tr_covmean)
-
-
-def process_feature_vecs(activations):
- mu = np.mean(activations, axis=0)
- sigma = np.cov(activations, rowvar=False)
-
- return mu, sigma
-
-
-def parse_opts():
- parser = argparse.ArgumentParser()
- parser.add_argument('--dataset', required=True, type=str, help='Dataset (brats | lidc-idri)')
- parser.add_argument('--img_size', required=True, type=int, help='Image size')
- parser.add_argument('--data_root_real', required=True, type=str, help='Path to real data')
- parser.add_argument('--data_root_fake', required=True, type=str, help='Path to fake data')
- parser.add_argument('--pretrain_path', required=True, type=str, help='Path to pretrained model')
- parser.add_argument('--path_to_activations', required=True, type=str, help='Path to activations')
- parser.add_argument('--n_seg_classes', default=2, type=int, help="Number of segmentation classes")
- parser.add_argument('--learning_rate', default=0.001, type=float,
- help='Initial learning rate (divided by 10 while training by lr scheduler)')
- parser.add_argument('--num_workers', default=4, type=int, help='Number of jobs')
- parser.add_argument('--batch_size', default=1, type=int, help='Batch Size')
- parser.add_argument('--phase', default='test', type=str, help='Phase of train or test')
- parser.add_argument('--save_intervals', default=10, type=int, help='Interation for saving model')
- parser.add_argument('--n_epochs', default=200, type=int, help='Number of total epochs to run')
- parser.add_argument('--input_D', default=256, type=int, help='Input size of depth')
- parser.add_argument('--input_H', default=256, type=int, help='Input size of height')
- parser.add_argument('--input_W', default=256, type=int, help='Input size of width')
- parser.add_argument('--resume_path', default='', type=str, help='Path for resume model.')
-
- parser.add_argument('--new_layer_names', default=['conv_seg'], type=list, help='New layer except for backbone')
- parser.add_argument('--no_cuda', action='store_true', help='If true, cuda is not used.')
- parser.set_defaults(no_cuda=False)
- parser.add_argument('--gpu_id', default=0, type=int, help='Gpu id')
- parser.add_argument('--model', default='resnet', type=str,
- help='(resnet | preresnet | wideresnet | resnext | densenet | ')
- parser.add_argument('--model_depth', default=50, type=int, help='Depth of resnet (10 | 18 | 34 | 50 | 101)')
- parser.add_argument('--resnet_shortcut', default='B', type=str, help='Shortcut type of resnet (A | B)')
- parser.add_argument('--manual_seed', default=1, type=int, help='Manually set random seed')
- parser.add_argument('--ci_test', action='store_true', help='If true, ci testing is used.')
- args = parser.parse_args()
- args.save_folder = "./trails/models/{}_{}".format(args.model, args.model_depth)
-
- return args
-
-
-if __name__ == '__main__':
- # Model settings
- sets = parse_opts()
- sets.target_type = "normal"
- sets.phase = 'test'
- sets.batch_size = 1
- sets.dims = 2048
- sets.num_samples = 1000
-
- if not sets.no_cuda:
- dev_name = 'cuda:' + str(sets.gpu_id)
- device = torch.device(dev_name)
- else:
- device = torch.device('cpu')
-
- # getting model
- print("Load model ...")
- model = get_feature_extractor(sets)
- model = model.to(device)
-
- # Data loader
- print("Initialize dataloader ...")
- if sets.dataset == 'brats':
- real_data = BRATSVolumes(sets.data_root_real, normalize=None, mode='real', img_size=sets.img_size)
- fake_data = BRATSVolumes(sets.data_root_fake, normalize=None, mode='fake', img_size=sets.img_size)
-
- elif sets.dataset == 'lidc-idri':
- real_data = LIDCVolumes(sets.data_root_real, normalize=None, mode='real', img_size=sets.img_size)
- fake_data = LIDCVolumes(sets.data_root_fake, normalize=None, mode='fake', img_size=sets.img_size)
-
- else:
- print("Dataloader for this dataset is not implemented. Use 'brats' or 'lidc-idri'.")
-
- real_data_loader = DataLoader(real_data, batch_size=sets.batch_size, shuffle=False, num_workers=sets.batch_size,
- pin_memory=False)
- fake_data_loader = DataLoader(fake_data, batch_size=sets.batch_size, shuffle=False, num_workers=sets.batch_size,
- pin_memory=False)
-
-
- # Real data
- print("Get activations from real data ...")
- activations_real = get_activations(model, real_data_loader, sets)
- mu_real, sigma_real = process_feature_vecs(activations_real)
-
- path_to_mu_real = os.path.join(sets.path_to_activations, 'mu_real.npy')
- path_to_sigma_real = os.path.join(sets.path_to_activations, 'sigma_real.npy')
- np.save(path_to_mu_real, mu_real)
- print("")
- print("Saved mu_real to: " + path_to_mu_real)
- np.save(path_to_sigma_real, sigma_real)
- print("Saved sigma_real to: " + path_to_sigma_real)
-
-
- # Fake data
- print("Get activations from fake/generated data ...")
- activations_fake = get_activations(model, fake_data_loader, sets)
- mu_fake, sigma_fake = process_feature_vecs(activations_fake)
-
- path_to_mu_fake = os.path.join(sets.path_to_activations, 'mu_fake.npy')
- path_to_sigma_fake = os.path.join(sets.path_to_activations, 'sigma_fake.npy')
- np.save(path_to_mu_fake, mu_fake)
- print("")
- print("Saved mu_fake to: " + path_to_mu_fake)
- np.save(path_to_sigma_fake, sigma_fake)
- print("Saved sigma_fake to: " + path_to_sigma_fake)
-
- fid = calculate_frechet_distance(mu_real, sigma_real, mu_fake, sigma_fake)
- print("The FID score is: ")
- print(fid)
diff --git a/wdm-3d-initial/eval/model.py b/wdm-3d-initial/eval/model.py
deleted file mode 100644
index 1209f249aa9b77dfeb22ad4f060402e28b6e9435..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/model.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-from torch import nn
-from models import resnet
-
-
-def generate_model(opt):
- assert opt.model in ['resnet']
-
- if opt.model == 'resnet':
- assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]
-
- if opt.model_depth == 10:
- model = resnet.resnet10(
- sample_input_W=opt.input_W,
- sample_input_H=opt.input_H,
- sample_input_D=opt.input_D,
- shortcut_type=opt.resnet_shortcut,
- no_cuda=opt.no_cuda,
- num_seg_classes=opt.n_seg_classes)
- elif opt.model_depth == 18:
- model = resnet.resnet18(
- sample_input_W=opt.input_W,
- sample_input_H=opt.input_H,
- sample_input_D=opt.input_D,
- shortcut_type=opt.resnet_shortcut,
- no_cuda=opt.no_cuda,
- num_seg_classes=opt.n_seg_classes)
- elif opt.model_depth == 34:
- model = resnet.resnet34(
- sample_input_W=opt.input_W,
- sample_input_H=opt.input_H,
- sample_input_D=opt.input_D,
- shortcut_type=opt.resnet_shortcut,
- no_cuda=opt.no_cuda,
- num_seg_classes=opt.n_seg_classes)
- elif opt.model_depth == 50:
- model = resnet.resnet50(
- sample_input_W=opt.input_W,
- sample_input_H=opt.input_H,
- sample_input_D=opt.input_D,
- shortcut_type=opt.resnet_shortcut,
- no_cuda=opt.no_cuda,
- num_seg_classes=opt.n_seg_classes)
- elif opt.model_depth == 101:
- model = resnet.resnet101(
- sample_input_W=opt.input_W,
- sample_input_H=opt.input_H,
- sample_input_D=opt.input_D,
- shortcut_type=opt.resnet_shortcut,
- no_cuda=opt.no_cuda,
- num_seg_classes=opt.n_seg_classes)
- elif opt.model_depth == 152:
- model = resnet.resnet152(
- sample_input_W=opt.input_W,
- sample_input_H=opt.input_H,
- sample_input_D=opt.input_D,
- shortcut_type=opt.resnet_shortcut,
- no_cuda=opt.no_cuda,
- num_seg_classes=opt.n_seg_classes)
- elif opt.model_depth == 200:
- model = resnet.resnet200(
- sample_input_W=opt.input_W,
- sample_input_H=opt.input_H,
- sample_input_D=opt.input_D,
- shortcut_type=opt.resnet_shortcut,
- no_cuda=opt.no_cuda,
- num_seg_classes=opt.n_seg_classes)
-
- if not opt.no_cuda:
- import os
- os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_id)
- model = model.cuda()
- model = nn.DataParallel(model)
- net_dict = model.state_dict()
- else:
- net_dict = model.state_dict()
-
- # load pretrain
- if opt.phase != 'test' and opt.pretrain_path:
- print('loading pretrained model {}'.format(opt.pretrain_path))
- pretrain = torch.load(opt.pretrain_path)
- pretrain_dict = {k: v for k, v in pretrain['state_dict'].items() if k in net_dict.keys()}
-
- net_dict.update(pretrain_dict)
- model.load_state_dict(net_dict)
-
- new_parameters = []
- for pname, p in model.named_parameters():
- for layer_name in opt.new_layer_names:
- if pname.find(layer_name) >= 0:
- new_parameters.append(p)
- break
-
- new_parameters_id = list(map(id, new_parameters))
- base_parameters = list(filter(lambda p: id(p) not in new_parameters_id, model.parameters()))
- parameters = {'base_parameters': base_parameters,
- 'new_parameters': new_parameters}
-
- return model, parameters
-
- return model, model.parameters()
diff --git a/wdm-3d-initial/eval/models/resnet.py b/wdm-3d-initial/eval/models/resnet.py
deleted file mode 100644
index f7f476e2e090eecea03b91ec97ce88e7d0bbdcc2..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/models/resnet.py
+++ /dev/null
@@ -1,245 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.autograd import Variable
-import math
-from functools import partial
-
-__all__ = [
- 'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
- 'resnet152', 'resnet200'
-]
-
-
-def conv3x3x3(in_planes, out_planes, stride=1, dilation=1):
- # 3x3x3 convolution with padding
- return nn.Conv3d(
- in_planes,
- out_planes,
- kernel_size=3,
- dilation=dilation,
- stride=stride,
- padding=dilation,
- bias=False)
-
-
-def downsample_basic_block(x, planes, stride, no_cuda=False):
- out = F.avg_pool3d(x, kernel_size=1, stride=stride)
- zero_pads = torch.Tensor(
- out.size(0), planes - out.size(1), out.size(2), out.size(3),
- out.size(4)).zero_()
- if not no_cuda:
- if isinstance(out.data, torch.cuda.FloatTensor):
- zero_pads = zero_pads.cuda()
-
- out = Variable(torch.cat([out.data, zero_pads], dim=1))
-
- return out
-
-
-class BasicBlock(nn.Module):
- expansion = 1
-
- def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
- super(BasicBlock, self).__init__()
- self.conv1 = conv3x3x3(inplanes, planes, stride=stride, dilation=dilation)
- self.bn1 = nn.BatchNorm3d(planes)
- self.relu = nn.ReLU(inplace=True)
- self.conv2 = conv3x3x3(planes, planes, dilation=dilation)
- self.bn2 = nn.BatchNorm3d(planes)
- self.downsample = downsample
- self.stride = stride
- self.dilation = dilation
-
- def forward(self, x):
- residual = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
- out = self.conv2(out)
- out = self.bn2(out)
-
- if self.downsample is not None:
- residual = self.downsample(x)
-
- out += residual
- out = self.relu(out)
-
- return out
-
-
-class Bottleneck(nn.Module):
- expansion = 4
-
- def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
- super(Bottleneck, self).__init__()
- self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
- self.bn1 = nn.BatchNorm3d(planes)
- self.conv2 = nn.Conv3d(
- planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False)
- self.bn2 = nn.BatchNorm3d(planes)
- self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
- self.bn3 = nn.BatchNorm3d(planes * 4)
- self.relu = nn.ReLU(inplace=True)
- self.downsample = downsample
- self.stride = stride
- self.dilation = dilation
-
- def forward(self, x):
- residual = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
- out = self.relu(out)
-
- out = self.conv3(out)
- out = self.bn3(out)
-
- if self.downsample is not None:
- residual = self.downsample(x)
-
- out += residual
- out = self.relu(out)
-
- return out
-
-
-class Flatten(torch.nn.Module):
- def forward(self, inp):
- return inp.view(inp.size(0), -1)
-
-
-class ResNet(nn.Module):
-
- def __init__(self,
- block,
- layers,
- sample_input_D,
- sample_input_H,
- sample_input_W,
- num_seg_classes,
- shortcut_type='B',
- no_cuda=False):
- self.inplanes = 64
- self.no_cuda = no_cuda
- super(ResNet, self).__init__()
- self.conv1 = nn.Conv3d(
- 1,
- 64,
- kernel_size=7,
- stride=(2, 2, 2),
- padding=(3, 3, 3),
- bias=False)
-
- self.bn1 = nn.BatchNorm3d(64)
- self.relu = nn.ReLU(inplace=True)
- self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
- self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
- self.layer2 = self._make_layer(
- block, 128, layers[1], shortcut_type, stride=2)
- self.layer3 = self._make_layer(
- block, 256, layers[2], shortcut_type, stride=1, dilation=2)
- self.layer4 = self._make_layer(
- block, 512, layers[3], shortcut_type, stride=1, dilation=4)
-
- self.conv_seg = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)), Flatten())
-
- for m in self.modules():
- if isinstance(m, nn.Conv3d):
- m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
- elif isinstance(m, nn.BatchNorm3d):
- m.weight.data.fill_(1)
- m.bias.data.zero_()
-
- def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, dilation=1):
- downsample = None
- if stride != 1 or self.inplanes != planes * block.expansion:
- if shortcut_type == 'A':
- downsample = partial(
- downsample_basic_block,
- planes=planes * block.expansion,
- stride=stride,
- no_cuda=self.no_cuda)
- else:
- downsample = nn.Sequential(
- nn.Conv3d(
- self.inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=stride,
- bias=False), nn.BatchNorm3d(planes * block.expansion))
-
- layers = []
- layers.append(block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample))
- self.inplanes = planes * block.expansion
- for i in range(1, blocks):
- layers.append(block(self.inplanes, planes, dilation=dilation))
-
- return nn.Sequential(*layers)
-
- def forward(self, x):
- x = self.conv1(x)
- x = self.bn1(x)
- x = self.relu(x)
- x = self.maxpool(x)
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
- x = self.conv_seg(x)
-
- return x
-
-
-def resnet10(**kwargs):
- """Constructs a ResNet-18 model.
- """
- model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
- return model
-
-
-def resnet18(**kwargs):
- """Constructs a ResNet-18 model.
- """
- model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
- return model
-
-
-def resnet34(**kwargs):
- """Constructs a ResNet-34 model.
- """
- model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
- return model
-
-
-def resnet50(**kwargs):
- """Constructs a ResNet-50 model.
- """
- model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
- return model
-
-
-def resnet101(**kwargs):
- """Constructs a ResNet-101 model.
- """
- model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
- return model
-
-
-def resnet152(**kwargs):
- """Constructs a ResNet-101 model.
- """
- model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
- return model
-
-
-def resnet200(**kwargs):
- """Constructs a ResNet-101 model.
- """
- model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
- return model
diff --git a/wdm-3d-initial/eval/ms_ssim.py b/wdm-3d-initial/eval/ms_ssim.py
deleted file mode 100644
index 02e98bea038e47fdd3d0fd19bc32761daa472259..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/ms_ssim.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import argparse
-import numpy as np
-import torch
-import sys
-
-sys.path.append(".")
-sys.path.append("..")
-
-from generative.metrics import MultiScaleSSIMMetric
-from monai import transforms
-from monai.config import print_config
-from monai.data import Dataset
-from monai.utils import set_determinism
-from torch.utils.data import DataLoader
-from tqdm import tqdm
-from guided_diffusion.bratsloader import BRATSVolumes
-from guided_diffusion.lidcloader import LIDCVolumes
-
-
-def parse_args():
- parser = argparse.ArgumentParser()
- parser.add_argument("--seed", type=int, default=42, help="Random seed to use.")
- parser.add_argument("--sample_dir", type=str, required=True, help="Location of the samples to evaluate.")
- parser.add_argument("--num_workers", type=int, default=8, help="Number of loader workers")
- parser.add_argument("--dataset", choices=['brats','lidc-idri'], required=True, help="Dataset (brats | lidc-idri)")
- parser.add_argument("--img_size", type=int, required=True)
-
- args = parser.parse_args()
- return args
-
-
-def main(args):
- set_determinism(seed=args.seed)
- #print_config()
-
- if args.dataset == 'brats':
- dataset_1 = BRATSVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
- dataset_2 = BRATSVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
-
- elif args.dataset == 'lidc-idri':
- dataset_1 = LIDCVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
- dataset_2 = LIDCVolumes(directory=args.sample_dir, mode='fake', img_size=args.img_size)
-
-
- dataloader_1 = DataLoader(dataset_1, batch_size=1, shuffle=False, num_workers=args.num_workers)
- dataloader_2 = DataLoader(dataset_2, batch_size=1, shuffle=False, num_workers=args.num_workers)
-
- device = torch.device("cuda")
- ms_ssim = MultiScaleSSIMMetric(spatial_dims=3, data_range=1.0, kernel_size=7)
-
- print("Computing MS-SSIM (this takes a while)...")
- ms_ssim_list = []
- pbar = tqdm(enumerate(dataloader_1), total=len(dataloader_1))
- for step, batch in pbar:
- img = batch[0]
- for batch2 in dataloader_2:
- img2 = batch2 [0]
- if batch[1] == batch2[1]:
- continue
- ms_ssim_list.append(ms_ssim(img.to(device), img2.to(device)).item())
- pbar.update()
-
- ms_ssim_list = np.array(ms_ssim_list)
- print("Calculated MS-SSIMs. Computing mean ...")
- print(f"Mean MS-SSIM: {ms_ssim_list.mean():.6f}")
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
diff --git a/wdm-3d-initial/eval/pretrained/pretrained.txt b/wdm-3d-initial/eval/pretrained/pretrained.txt
deleted file mode 100644
index c1a29131549b59526eaafa1deca429edf9e54364..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/eval/pretrained/pretrained.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Path to store pretrained models.
-We used a pretrained 3D ResNet from: https://github.com/Tencent/MedicalNet
-Pretrained model weights for the model 'resnet_50_23dataset.pth' are available at: https://drive.google.com/file/d/13tnSvXY7oDIEloNFiGTsjUIYfS3g3BfG/view?usp=sharing
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/__init__-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/__init__-checkpoint.py
deleted file mode 100644
index fd11937a64d66c81dffd618117efc3a9ddc5fd9c..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/__init__-checkpoint.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-Codebase for "Diffusion Models for Medial Anomaly Detection".
-"""
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/bratsloader-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/bratsloader-checkpoint.py
deleted file mode 100644
index 951e0dda170f9f1bf2370c8bab49596724f13e9d..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/bratsloader-checkpoint.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.utils.data
-import numpy as np
-import os
-import os.path
-import nibabel
-
-
-class BRATSVolumes(torch.utils.data.Dataset):
- def __init__(self, directory, test_flag=False, normalize=None, mode='train', img_size=256):
- '''
- directory is expected to contain some folder structure:
- if some subfolder contains only files, all of these
- files are assumed to have a name like
- brats_train_NNN_XXX_123_w.nii.gz
- where XXX is one of t1n, t1c, t2w, t2f, seg
- we assume these five files belong to the same image
- seg is supposed to contain the segmentation
- '''
- super().__init__()
- self.mode = mode
- self.directory = os.path.expanduser(directory)
- self.normalize = normalize or (lambda x: x)
- self.test_flag = test_flag
- self.img_size = img_size
- if test_flag:
- self.seqtypes = ['t1n', 't1c', 't2w', 't2f']
- else:
- self.seqtypes = ['t1n', 't1c', 't2w', 't2f', 'seg']
- self.seqtypes_set = set(self.seqtypes)
- self.database = []
-
- if not self.mode == 'fake': # Used during training and for evaluating real data
- for root, dirs, files in os.walk(self.directory):
- # if there are no subdirs, we have a datadir
- if not dirs:
- files.sort()
- datapoint = dict()
- # extract all files as channels
- for f in files:
- seqtype = f.split('-')[4].split('.')[0]
- datapoint[seqtype] = os.path.join(root, f)
- self.database.append(datapoint)
- else: # Used for evaluating fake data
- for root, dirs, files in os.walk(self.directory):
- for f in files:
- datapoint = dict()
- datapoint['t1n'] = os.path.join(root, f)
- self.database.append(datapoint)
-
- def __getitem__(self, x):
- filedict = self.database[x]
- name = filedict['t1n']
- nib_img = nibabel.load(name) # We only use t1 weighted images
- out = nib_img.get_fdata()
-
- if not self.mode == 'fake':
- # CLip and normalize the images
- out_clipped = np.clip(out, np.quantile(out, 0.001), np.quantile(out, 0.999))
- out_normalized = (out_clipped - np.min(out_clipped)) / (np.max(out_clipped) - np.min(out_clipped))
- out = torch.tensor(out_normalized)
-
- # Zero pad images
- image = torch.zeros(1, 256, 256, 256)
- image[:, 8:-8, 8:-8, 50:-51] = out
-
- # Downsampling
- if self.img_size == 128:
- downsample = nn.AvgPool3d(kernel_size=2, stride=2)
- image = downsample(image)
- else:
- image = torch.tensor(out, dtype=torch.float32)
- image = image.unsqueeze(dim=0)
-
- # Normalization
- image = self.normalize(image)
-
- if self.mode == 'fake':
- return image, name
- else:
- return image
-
- def __len__(self):
- return len(self.database)
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/dist_util-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/dist_util-checkpoint.py
deleted file mode 100644
index c2385710ce2a404050021411fd6418cdf9548b02..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/dist_util-checkpoint.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Helpers for distributed training.
-"""
-
-import io
-import os
-import socket
-
-import blobfile as bf
-import torch as th
-import torch.distributed as dist
-
-# Change this to reflect your cluster layout.
-# The GPU for a given rank is (rank % GPUS_PER_NODE).
-GPUS_PER_NODE = 8
-
-SETUP_RETRY_COUNT = 3
-
-
-def setup_dist(devices=(0,)):
- """
- Setup a distributed process group.
- """
- if dist.is_initialized():
- return
- try:
- device_string = ','.join(map(str, devices))
- except TypeError:
- device_string = str(devices)
- os.environ["CUDA_VISIBLE_DEVICES"] = device_string #f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
-
- #comm = MPI.COMM_WORLD
- # print('commworld, 'f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}", comm)
- backend = "gloo" if not th.cuda.is_available() else "nccl"
- # print('commrank', comm.rank)
- # print('commsize', comm.size)
-
- if backend == "gloo":
- hostname = "localhost"
- else:
- hostname = socket.gethostbyname(socket.getfqdn())
- os.environ["MASTER_ADDR"] = '127.0.1.1'#comm.bcast(hostname, root=0)
- os.environ["RANK"] = '0'#str(comm.rank)
- os.environ["WORLD_SIZE"] = '1'#str(comm.size)
-
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("", 0))
- s.listen(1)
- port = s.getsockname()[1]
- s.close()
- # print('port2', port)
- os.environ["MASTER_PORT"] = str(port)
- dist.init_process_group(backend=backend, init_method="env://")
-
-
-def dev(device_number=0):
- """
- Get the device to use for torch.distributed.
- """
- if isinstance(device_number, (list, tuple)): # multiple devices specified
- return [dev(k) for k in device_number] # recursive call
- if th.cuda.is_available():
- device_count = th.cuda.device_count()
- if device_count == 1:
- return th.device(f"cuda")
- else:
- if device_number < device_count: # if we specify multiple devices, we have to be specific
- return th.device(f'cuda:{device_number}')
- else:
- raise ValueError(f'requested device number {device_number} (0-indexed) but only {device_count} devices available')
- return th.device("cpu")
-
-
-def load_state_dict(path, **kwargs):
- """
- Load a PyTorch file without redundant fetches across MPI ranks.
- """
- #print('mpicommworldgetrank', MPI.COMM_WORLD.Get_rank())
- mpigetrank=0
- # if MPI.COMM_WORLD.Get_rank() == 0:
- if mpigetrank==0:
- with bf.BlobFile(path, "rb") as f:
- data = f.read()
- else:
- data = None
- # data = MPI.COMM_WORLD.bcast(data)
- # print('mpibacst', MPI.COMM_WORLD.bcast(data))
- return th.load(io.BytesIO(data), **kwargs)
-
-
-def sync_params(params):
- """
- Synchronize a sequence of Tensors across ranks from rank 0.
- """
- #for p in params:
- # with th.no_grad():
- # dist.broadcast(p, 0)
-
-
-def _find_free_port():
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("", 0))
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- return s.getsockname()[1]
- finally:
- s.close()
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/gaussian_diffusion-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/gaussian_diffusion-checkpoint.py
deleted file mode 100644
index 73ade109a16d3eb91588b5f53f7e05c82407aeed..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/gaussian_diffusion-checkpoint.py
+++ /dev/null
@@ -1,1185 +0,0 @@
-"""
-This code started out as a PyTorch port of Ho et al's diffusion models:
-https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
-
-Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
-"""
-from PIL import Image
-from torch.autograd import Variable
-import enum
-import torch.nn.functional as F
-from torchvision.utils import save_image
-import torch
-import math
-import numpy as np
-import torch as th
-from .train_util import visualize
-from .nn import mean_flat
-from .losses import normal_kl, discretized_gaussian_log_likelihood
-from scipy import ndimage
-from torchvision import transforms
-import matplotlib.pyplot as plt
-from scipy.interpolate import interp1d
-
-from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
-
-dwt = DWT_3D('haar')
-idwt = IDWT_3D('haar')
-
-
-def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
- """
- Get a pre-defined beta schedule for the given name.
-
- The beta schedule library consists of beta schedules which remain similar
- in the limit of num_diffusion_timesteps.
- Beta schedules may be added, but should not be removed or changed once
- they are committed to maintain backwards compatibility.
- """
- if schedule_name == "linear":
- # Linear schedule from Ho et al, extended to work for any number of
- # diffusion steps.
- scale = 1000 / num_diffusion_timesteps
- beta_start = scale * 0.0001
- beta_end = scale * 0.02
- return np.linspace(
- beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
- )
- elif schedule_name == "cosine":
- return betas_for_alpha_bar(
- num_diffusion_timesteps,
- lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
- )
- else:
- raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
-
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-class ModelMeanType(enum.Enum):
- """
- Which type of output the model predicts.
- """
-
- PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
- START_X = enum.auto() # the model predicts x_0
- EPSILON = enum.auto() # the model predicts epsilon
-
-
-class ModelVarType(enum.Enum):
- """
- What is used as the model's output variance.
-
- The LEARNED_RANGE option has been added to allow the model to predict
- values between FIXED_SMALL and FIXED_LARGE, making its job easier.
- """
-
- LEARNED = enum.auto()
- FIXED_SMALL = enum.auto()
- FIXED_LARGE = enum.auto()
- LEARNED_RANGE = enum.auto()
-
-
-class LossType(enum.Enum):
- MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
- RESCALED_MSE = (
- enum.auto()
- ) # use raw MSE loss (with RESCALED_KL when learning variances)
- KL = enum.auto() # use the variational lower-bound
- RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
-
- def is_vb(self):
- return self == LossType.KL or self == LossType.RESCALED_KL
-
-
-class GaussianDiffusion:
- """
- Utilities for training and sampling diffusion models.
-
- Ported directly from here, and then adapted over time to further experimentation.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
-
- :param betas: a 1-D numpy array of betas for each diffusion timestep,
- starting at T and going to 1.
- :param model_mean_type: a ModelMeanType determining what the model outputs.
- :param model_var_type: a ModelVarType determining how variance is output.
- :param loss_type: a LossType determining the loss function to use.
- :param rescale_timesteps: if True, pass floating point timesteps into the
- model so that they are always scaled like in the
- original paper (0 to 1000).
- """
-
- def __init__(
- self,
- *,
- betas,
- model_mean_type,
- model_var_type,
- loss_type,
- rescale_timesteps=False,
- mode='default',
- loss_level='image'
- ):
- self.model_mean_type = model_mean_type
- self.model_var_type = model_var_type
- self.loss_type = loss_type
- self.rescale_timesteps = rescale_timesteps
- self.mode = mode
- self.loss_level=loss_level
-
- # Use float64 for accuracy.
- betas = np.array(betas, dtype=np.float64)
- self.betas = betas
- assert len(betas.shape) == 1, "betas must be 1-D"
- assert (betas > 0).all() and (betas <= 1).all()
-
- self.num_timesteps = int(betas.shape[0])
-
- alphas = 1.0 - betas
- self.alphas_cumprod = np.cumprod(alphas, axis=0) # t
- self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) # t-1
- self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) # t+1
- assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
- self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
- self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
- self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
- self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
-
- # calculations for posterior q(x_{t-1} | x_t, x_0)
- self.posterior_variance = (
- betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- # log calculation clipped because the posterior variance is 0 at the
- # beginning of the diffusion chain.
- self.posterior_log_variance_clipped = np.log(
- np.append(self.posterior_variance[1], self.posterior_variance[1:])
- )
- self.posterior_mean_coef1 = (
- betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- self.posterior_mean_coef2 = (
- (1.0 - self.alphas_cumprod_prev)
- * np.sqrt(alphas)
- / (1.0 - self.alphas_cumprod)
- )
-
- def q_mean_variance(self, x_start, t):
- """
- Get the distribution q(x_t | x_0).
-
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
- """
- mean = (
- _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- )
- variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
- log_variance = _extract_into_tensor(
- self.log_one_minus_alphas_cumprod, t, x_start.shape
- )
- return mean, variance, log_variance
-
- def q_sample(self, x_start, t, noise=None):
- """
- Diffuse the data for a given number of diffusion steps.
-
- In other words, sample from q(x_t | x_0).
-
- :param x_start: the initial data batch.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :param noise: if specified, the split-out normal noise.
- :return: A noisy version of x_start.
- """
- if noise is None:
- noise = th.randn_like(x_start)
- assert noise.shape == x_start.shape
- return (
- _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
- * noise
- )
-
- def q_posterior_mean_variance(self, x_start, x_t, t):
- """
- Compute the mean and variance of the diffusion posterior:
-
- q(x_{t-1} | x_t, x_0)
-
- """
-
- assert x_start.shape == x_t.shape
- posterior_mean = (
- _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
- + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
- )
- posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
- posterior_log_variance_clipped = _extract_into_tensor(
- self.posterior_log_variance_clipped, t, x_t.shape
- )
- assert (
- posterior_mean.shape[0]
- == posterior_variance.shape[0]
- == posterior_log_variance_clipped.shape[0]
- == x_start.shape[0]
- )
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
-
- def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
- """
- Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
- the initial x, x_0.
- :param model: the model, which takes a signal and a batch of timesteps
- as input.
- :param x: the [N x C x ...] tensor at time t.
- :param t: a 1-D Tensor of timesteps.
- :param clip_denoised: if True, clip the denoised signal into [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample. Applies before
- clip_denoised.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict with the following keys:
- - 'mean': the model mean output.
- - 'variance': the model variance output.
- - 'log_variance': the log of 'variance'.
- - 'pred_xstart': the prediction for x_0.
- """
- if model_kwargs is None:
- model_kwargs = {}
-
- B, C = x.shape[:2]
-
- assert t.shape == (B,)
- model_output = model(x, self._scale_timesteps(t), **model_kwargs)
-
- if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
- assert model_output.shape == (B, C * 2, *x.shape[2:])
- model_output, model_var_values = th.split(model_output, C, dim=1)
- if self.model_var_type == ModelVarType.LEARNED:
- model_log_variance = model_var_values
- model_variance = th.exp(model_log_variance)
- else:
- min_log = _extract_into_tensor(
- self.posterior_log_variance_clipped, t, x.shape
- )
- max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
- # The model_var_values is [-1, 1] for [min_var, max_var].
- frac = (model_var_values + 1) / 2
- model_log_variance = frac * max_log + (1 - frac) * min_log
- model_variance = th.exp(model_log_variance)
- else:
- model_variance, model_log_variance = {
- # for fixedlarge, we set the initial (log-)variance like so
- # to get a better decoder log likelihood.
- ModelVarType.FIXED_LARGE: (
- np.append(self.posterior_variance[1], self.betas[1:]),
- np.log(np.append(self.posterior_variance[1], self.betas[1:])),
- ),
- ModelVarType.FIXED_SMALL: (
- self.posterior_variance,
- self.posterior_log_variance_clipped,
- ),
- }[self.model_var_type]
- model_variance = _extract_into_tensor(model_variance, t, x.shape)
- model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
-
- def process_xstart(x):
- if denoised_fn is not None:
- x = denoised_fn(x)
- if clip_denoised:
- B, _, H, W, D = x.size()
- x_idwt = idwt(x[:, 0, :, :, :].view(B, 1, H, W, D) * 3.,
- x[:, 1, :, :, :].view(B, 1, H, W, D),
- x[:, 2, :, :, :].view(B, 1, H, W, D),
- x[:, 3, :, :, :].view(B, 1, H, W, D),
- x[:, 4, :, :, :].view(B, 1, H, W, D),
- x[:, 5, :, :, :].view(B, 1, H, W, D),
- x[:, 6, :, :, :].view(B, 1, H, W, D),
- x[:, 7, :, :, :].view(B, 1, H, W, D))
-
- x_idwt_clamp = x_idwt.clamp(-1, 1)
-
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(x_idwt_clamp)
- x = th.cat([LLL / 3., LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1)
-
- return x
- return x
-
- if self.model_mean_type == ModelMeanType.PREVIOUS_X:
- pred_xstart = process_xstart(
- self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
- )
- model_mean = model_output
- elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
- if self.model_mean_type == ModelMeanType.START_X:
- pred_xstart = process_xstart(model_output)
- else:
- pred_xstart = process_xstart(
- self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
- )
- model_mean, _, _ = self.q_posterior_mean_variance(
- x_start=pred_xstart, x_t=x, t=t
- )
- else:
- raise NotImplementedError(self.model_mean_type)
-
- assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
-
-
- return {
- "mean": model_mean,
- "variance": model_variance,
- "log_variance": model_log_variance,
- "pred_xstart": pred_xstart,
- }
-
- def _predict_xstart_from_eps(self, x_t, t, eps):
- assert x_t.shape == eps.shape
- return (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
- )
-
- def _predict_xstart_from_xprev(self, x_t, t, xprev):
- assert x_t.shape == xprev.shape
- return ( # (xprev - coef2*x_t) / coef1
- _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- - _extract_into_tensor(
- self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
- )
- * x_t
- )
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- if self.mode == 'segmentation':
- x_t = x_t[:, -pred_xstart.shape[1]:, ...]
- assert pred_xstart.shape == x_t.shape
- eps = (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- - pred_xstart
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
- return eps
-
- def _scale_timesteps(self, t):
- if self.rescale_timesteps:
- return t.float() * (1000.0 / self.num_timesteps)
- return t
-
- def condition_mean(self, cond_fn, p_mean_var, x, t, update=None, model_kwargs=None):
- """
- Compute the mean for the previous step, given a function cond_fn that
- computes the gradient of a conditional log probability with respect to
- x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
- condition on y.
-
- This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
- """
-
-
- if update is not None:
- print('CONDITION MEAN UPDATE NOT NONE')
-
- new_mean = (
- p_mean_var["mean"].detach().float() + p_mean_var["variance"].detach() * update.float()
- )
- a=update
-
- else:
- a, gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
- new_mean = (
- p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
- )
-
- return a, new_mean
-
-
-
- def condition_score2(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
- """
- Compute what the p_mean_variance output would have been, should the
- model's score function be conditioned by cond_fn.
- See condition_mean() for details on cond_fn.
- Unlike condition_mean(), this instead uses the conditioning strategy
- from Song et al (2020).
- """
- t=t.long()
- alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
-
- eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
- a, cfn= cond_fn(
- x, self._scale_timesteps(t).long(), **model_kwargs
- )
- eps = eps - (1 - alpha_bar).sqrt() * cfn
-
- out = p_mean_var.copy()
- out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
- out["mean"], _, _ = self.q_posterior_mean_variance(
- x_start=out["pred_xstart"], x_t=x, t=t
- )
- return out, cfn
-
- def sample_known(self, img, batch_size = 1):
- image_size = self.image_size
- channels = self.channels
- return self.p_sample_loop_known(model,(batch_size, channels, image_size, image_size), img)
-
-
- def p_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=True,
- ):
- """
- Generate samples from the model.
-
- :param model: the model module.
- :param shape: the shape of the samples, (N, C, H, W).
- :param noise: if specified, the noise from the encoder to sample.
- Should be of the same shape as `shape`.
- :param clip_denoised: if True, clip x_start predictions to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :param device: if specified, the device to create the samples on.
- If not specified, use a model parameter's device.
- :param progress: if True, show a tqdm progress bar.
- :return: a non-differentiable batch of samples.
- """
- final = None
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"]
-
- def p_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- ):
- """
- Sample x_{t-1} from the model at the given timestep.
- :param model: the model to sample from.
- :param x: the current tensor at x_{t-1}.
- :param t: the value of t, starting at 0 for the first diffusion step.
- :param clip_denoised: if True, clip the x_start prediction to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict containing the following keys:
- - 'sample': a random sample from the model.
- - 'pred_xstart': a prediction of x_0.
- """
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- noise = th.randn_like(x)
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
- ) # no noise when t == 0
- if cond_fn is not None:
- out["mean"] = self.condition_mean(
- cond_fn, out, x, t, model_kwargs=model_kwargs
- )
- sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
- return {"sample": sample, "pred_xstart": out["pred_xstart"]}
-
- def p_sample_loop_known(
- self,
- model,
- shape,
- img,
- org=None,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- noise_level=500,
- progress=False,
- classifier=None
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
-
-
- t = th.randint(499,500, (b,), device=device).long().to(device)
-
- org=img[0].to(device)
- img=img[0].to(device)
- indices = list(range(t))[::-1]
- noise = th.randn_like(img[:, :4, ...]).to(device)
- x_noisy = self.q_sample(x_start=img[:, :4, ...], t=t, noise=noise).to(device)
- x_noisy = torch.cat((x_noisy, img[:, 4:, ...]), dim=1)
-
-
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- time=noise_level,
- noise=x_noisy,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- org=org,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- classifier=classifier
- ):
- final = sample
-
- return final["sample"], x_noisy, img
-
- def p_sample_loop_interpolation(
- self,
- model,
- shape,
- img1,
- img2,
- lambdaint,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- t = th.randint(299,300, (b,), device=device).long().to(device)
- img1=torch.tensor(img1).to(device)
- img2 = torch.tensor(img2).to(device)
- noise = th.randn_like(img1).to(device)
- x_noisy1 = self.q_sample(x_start=img1, t=t, noise=noise).to(device)
- x_noisy2 = self.q_sample(x_start=img2, t=t, noise=noise).to(device)
- interpol=lambdaint*x_noisy1+(1-lambdaint)*x_noisy2
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- time=t,
- noise=interpol,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"], interpol, img1, img2
-
-
- def p_sample_loop_progressive(
- self,
- model,
- shape,
- time=1000,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=True,
- ):
- """
- Generate samples from the model and yield intermediate samples from
- each timestep of diffusion.
-
- Arguments are the same as p_sample_loop().
- Returns a generator over dicts, where each dict is the return value of
- p_sample().
- """
-
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- img = th.randn(*shape, device=device)
-
- indices = list(range(time))[::-1]
- if progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
- indices = tqdm(indices)
-
- for i in indices:
- t = th.tensor([i] * shape[0], device=device)
- with th.no_grad():
- out = self.p_sample(
- model,
- img,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- )
- yield out
- img = out["sample"]
-
- def ddim_sample(
- self,
- model,
- x,
- t, # index of current step
- t_cpu=None,
- t_prev=None, # index of step that we are going to compute, only used for heun
- t_prev_cpu=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- eta=0.0,
- sampling_steps=0,
- ):
- """
- Sample x_{t-1} from the model using DDIM.
- Same usage as p_sample().
- """
- relerr = lambda x, y: (x-y).abs().sum() / y.abs().sum()
- if cond_fn is not None:
- out, saliency = self.condition_score2(cond_fn, out, x, t, model_kwargs=model_kwargs)
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- eps_orig = self._predict_eps_from_xstart(x_t=x, t=t, pred_xstart=out["pred_xstart"])
- if self.mode == 'default':
- shape = x.shape
- elif self.mode == 'segmentation':
- shape = eps_orig.shape
- else:
- raise NotImplementedError(f'mode "{self.mode}" not implemented')
-
- if not sampling_steps:
- alpha_bar_orig = _extract_into_tensor(self.alphas_cumprod, t, shape)
- alpha_bar_prev_orig = _extract_into_tensor(self.alphas_cumprod_prev, t, shape)
- else:
- xp = np.arange(0, 1000, 1, dtype=np.float)
- alpha_cumprod_fun = interp1d(xp, self.alphas_cumprod,
- bounds_error=False,
- fill_value=(self.alphas_cumprod[0], self.alphas_cumprod[-1]),
- )
- alpha_bar_orig = alpha_cumprod_fun(t_cpu).item()
- alpha_bar_prev_orig = alpha_cumprod_fun(t_prev_cpu).item()
- sigma = (
- eta
- * ((1 - alpha_bar_prev_orig) / (1 - alpha_bar_orig))**.5
- * (1 - alpha_bar_orig / alpha_bar_prev_orig)**.5
- )
- noise = th.randn(size=shape, device=x.device)
- mean_pred = (
- out["pred_xstart"] * alpha_bar_prev_orig**.5
- + (1 - alpha_bar_prev_orig - sigma ** 2)**.5 * eps_orig
- )
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(shape) - 1)))
- )
- sample = mean_pred + nonzero_mask * sigma * noise
- return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
-
-
- def ddim_reverse_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- model_kwargs=None,
- eta=0.0,
- ):
- """
- Sample x_{t+1} from the model using DDIM reverse ODE.
- """
- assert eta == 0.0, "Reverse ODE only for deterministic path"
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- # Usually our model outputs epsilon, but we re-derive it
- # in case we used x_start or x_prev prediction.
- eps = (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- - out["pred_xstart"]
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
- alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
-
- # Equation 12. reversed
- mean_pred = (
- out["pred_xstart"] * th.sqrt(alpha_bar_next)
- + th.sqrt(1 - alpha_bar_next) * eps
- )
-
- return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
-
-
-
- def ddim_sample_loop_interpolation(
- self,
- model,
- shape,
- img1,
- img2,
- lambdaint,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- t = th.randint(199,200, (b,), device=device).long().to(device)
- img1=torch.tensor(img1).to(device)
- img2 = torch.tensor(img2).to(device)
- noise = th.randn_like(img1).to(device)
- x_noisy1 = self.q_sample(x_start=img1, t=t, noise=noise).to(device)
- x_noisy2 = self.q_sample(x_start=img2, t=t, noise=noise).to(device)
- interpol=lambdaint*x_noisy1+(1-lambdaint)*x_noisy2
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- time=t,
- noise=interpol,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"], interpol, img1, img2
-
- def ddim_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- sampling_steps=0,
- ):
- """
- Generate samples from the model using DDIM.
-
- Same usage as p_sample_loop().
- """
- final = None
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- #t = th.randint(0,1, (b,), device=device).long().to(device)
- t = 1000
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- time=t,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- eta=eta,
- sampling_steps=sampling_steps,
- ):
-
- final = sample
- return final["sample"]
-
-
-
- def ddim_sample_loop_known(
- self,
- model,
- shape,
- img,
- mode='default',
- org=None,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- noise_level=1000, # must be same as in training
- progress=False,
- conditioning=False,
- conditioner=None,
- classifier=None,
- eta=0.0,
- sampling_steps=0,
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- t = th.randint(0,1, (b,), device=device).long().to(device)
- img = img.to(device)
-
- indices = list(range(t))[::-1]
- if mode == 'segmentation':
- noise = None
- x_noisy = None
- elif mode == 'default':
- noise = None
- x_noisy = None
- else:
- raise NotImplementedError(f'mode "{mode}" not implemented')
-
- final = None
- # pass images to be segmented as condition
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- segmentation_img=img, # image to be segmented
- time=noise_level,
- noise=x_noisy,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- eta=eta,
- sampling_steps=sampling_steps,
- ):
- final = sample
-
- return final["sample"], x_noisy, img
-
-
- def ddim_sample_loop_progressive(
- self,
- model,
- shape,
- segmentation_img=None, # define to perform segmentation
- time=1000,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- sampling_steps=0,
- ):
- """
- Use DDIM to sample from the model and yield intermediate samples from
- each timestep of DDIM.
-
- Same usage as p_sample_loop_progressive().
- """
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- if segmentation_img is None: # normal sampling
- img = th.randn(*shape, device=device)
- else: # segmentation mode
- label_shape = (segmentation_img.shape[0], model.out_channels, *segmentation_img.shape[2:])
- img = th.randn(label_shape, dtype=segmentation_img.dtype, device=segmentation_img.device)
-
- indices = list(range(time))[::-1] # klappt nur für batch_size == 1
-
-
- if sampling_steps:
- tmp = np.linspace(999, 0, sampling_steps)
- tmp = np.append(tmp, -tmp[-2])
- indices = tmp[:-1].round().astype(np.int)
- indices_prev = tmp[1:].round().astype(np.int)
- else:
- indices_prev = [i-1 for i in indices]
-
- if True: #progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
-
- indices = tqdm(indices)
-
- for i, i_prev in zip(indices, indices_prev): # 1000 -> 0
- if segmentation_img is not None:
- prev_img = img
- img = th.cat((segmentation_img, img), dim=1)
- t = th.tensor([i] * shape[0], device=device)
- t_prev = th.tensor([i_prev] * shape[0], device=device)
- with th.no_grad():
- out = self.ddim_sample(
- model,
- img,
- t,
- t_cpu=i,
- t_prev=t_prev,
- t_prev_cpu=i_prev,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- eta=eta,
- sampling_steps=sampling_steps,
- )
- yield out
- img = out["sample"]
-
- def _vb_terms_bpd(
- self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
- ):
- """
- Get a term for the variational lower-bound.
-
- The resulting units are bits (rather than nats, as one might expect).
- This allows for comparison to other papers.
-
- :return: a dict with the following keys:
- - 'output': a shape [N] tensor of NLLs or KLs.
- - 'pred_xstart': the x_0 predictions.
- """
- true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
- x_start=x_start, x_t=x_t, t=t
- )
- out = self.p_mean_variance(
- model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
- )
- kl = normal_kl(
- true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
- )
- kl = mean_flat(kl) / np.log(2.0)
-
- decoder_nll = -discretized_gaussian_log_likelihood(
- x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
- )
- assert decoder_nll.shape == x_start.shape
- decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
-
- # At the first timestep return the decoder NLL,
- # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
- output = th.where((t == 0), decoder_nll, kl)
- return {"output": output, "pred_xstart": out["pred_xstart"]}
-
- def training_losses(self, model, x_start, t, classifier=None, model_kwargs=None, noise=None, labels=None,
- mode='default'):
- """
- Compute training losses for a single timestep.
- :param model: the model to evaluate loss on.
- :param x_start: the [N x C x ...] tensor of inputs - original image resolution.
- :param t: a batch of timestep indices.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :param noise: if specified, the specific Gaussian noise to try to remove.
- :param labels: must be specified for mode='segmentation'
- :param mode: can be default (image generation), segmentation
- :return: a dict with the key "loss" containing a tensor of shape [N].
- Some mean or variance settings may also have other keys.
- """
- if model_kwargs is None:
- model_kwargs = {}
-
- # Wavelet transform the input image
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(x_start)
- x_start_dwt = th.cat([LLL / 3., LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1)
-
- if mode == 'default':
- noise = th.randn_like(x_start) # Sample noise - original image resolution.
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(noise)
- noise_dwt = th.cat([LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1) # Wavelet transformed noise
- x_t = self.q_sample(x_start_dwt, t, noise=noise_dwt) # Sample x_t
-
- else:
- raise ValueError(f'Invalid mode {mode=}, needs to be "default"')
-
- model_output = model(x_t, self._scale_timesteps(t), **model_kwargs) # Model outputs denoised wavelet subbands
-
- # Inverse wavelet transform the model output
- B, _, H, W, D = model_output.size()
- model_output_idwt = idwt(model_output[:, 0, :, :, :].view(B, 1, H, W, D) * 3.,
- model_output[:, 1, :, :, :].view(B, 1, H, W, D),
- model_output[:, 2, :, :, :].view(B, 1, H, W, D),
- model_output[:, 3, :, :, :].view(B, 1, H, W, D),
- model_output[:, 4, :, :, :].view(B, 1, H, W, D),
- model_output[:, 5, :, :, :].view(B, 1, H, W, D),
- model_output[:, 6, :, :, :].view(B, 1, H, W, D),
- model_output[:, 7, :, :, :].view(B, 1, H, W, D))
-
- terms = {"mse_wav": th.mean(mean_flat((x_start_dwt - model_output) ** 2), dim=0)}
-
- return terms, model_output, model_output_idwt
-
-
- def _prior_bpd(self, x_start):
- """
- Get the prior KL term for the variational lower-bound, measured in
- bits-per-dim.
-
- This term can't be optimized, as it only depends on the encoder.
-
- :param x_start: the [N x C x ...] tensor of inputs.
- :return: a batch of [N] KL values (in bits), one per batch element.
- """
- batch_size = x_start.shape[0]
- t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
- kl_prior = normal_kl(
- mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
- )
- return mean_flat(kl_prior) / np.log(2.0)
-
- def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
- """
- Compute the entire variational lower-bound, measured in bits-per-dim,
- as well as other related quantities.
-
- :param model: the model to evaluate loss on.
- :param x_start: the [N x C x ...] tensor of inputs.
- :param clip_denoised: if True, clip denoised samples.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
-
- :return: a dict containing the following keys:
- - total_bpd: the total variational lower-bound, per batch element.
- - prior_bpd: the prior term in the lower-bound.
- - vb: an [N x T] tensor of terms in the lower-bound.
- - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- - mse: an [N x T] tensor of epsilon MSEs for each timestep.
- """
- device = x_start.device
- batch_size = x_start.shape[0]
-
- vb = []
- xstart_mse = []
- mse = []
- for t in list(range(self.num_timesteps))[::-1]:
- t_batch = th.tensor([t] * batch_size, device=device)
- noise = th.randn_like(x_start)
- x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
-
- # Calculate VLB term at the current timestep
- with th.no_grad():
- out = self._vb_terms_bptimestepsd(
- model,
- x_start=x_start,
- x_t=x_t,
- t=t_batch,
- clip_denoised=clip_denoised,
- model_kwargs=model_kwargs,
- )
- vb.append(out["output"])
- xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
- eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
- mse.append(mean_flat((eps - noise) ** 2))
-
- vb = th.stack(vb, dim=1)
- xstart_mse = th.stack(xstart_mse, dim=1)
- mse = th.stack(mse, dim=1)
-
- prior_bpd = self._prior_bpd(x_start)
- total_bpd = vb.sum(dim=1) + prior_bpd
- return {
- "total_bpd": total_bpd,
- "prior_bpd": prior_bpd,
- "vb": vb,
- "xstart_mse": xstart_mse,
- "mse": mse,
- }
-
-
-def _extract_into_tensor(arr, timesteps, broadcast_shape):
- """
- Extract values from a 1-D numpy array for a batch of indices.
-
- :param arr: the 1-D numpy array.
- :param timesteps: a tensor of indices into the array to extract.
- :param broadcast_shape: a larger shape of K dimensions with the batch
- dimension equal to the length of timesteps.
- :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
- """
- res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
- while len(res.shape) < len(broadcast_shape):
- res = res[..., None]
- return res.expand(broadcast_shape)
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/inpaintloader-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/inpaintloader-checkpoint.py
deleted file mode 100644
index db8e01e8ae99966471431e1fbef9039781688bce..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/inpaintloader-checkpoint.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import os, nibabel, torch, numpy as np
-import torch.nn as nn
-from torch.utils.data import Dataset
-import pandas as pd
-import re
-
-
-class InpaintVolumes(Dataset):
- """
- Y : float32 [C, D, H, W] full multi-modal MRI stack
- M : float32 [1, D, H, W] binary in-painting mask (shared by all mods)
- Y_void = Y * (1 - M) context with lesion region blanked
- name : identifier string
- """
-
- # ------------------------------------------------------------
- def __init__(self,
- root_dir: str,
- subset: str = 'train', # 'train' | 'val'
- img_size: int = 256, # 128 or 256 cube
- modalities: tuple = ('T1w',), # order defines channel order
- normalize=None):
- super().__init__()
- self.root_dir = os.path.expanduser(root_dir)
- self.subset = subset
- self.img_size = img_size
- self.modalities = modalities
- self.normalize = normalize or (lambda x: x)
- self.cases = self._index_cases() # ⇒ list[dict]
-
- # ------------------------------------------------------------
- def _index_cases(self):
- """
- Build a list like:
- {'img': {'T1w': path, 'FLAIR': path, ...},
- 'mask': path,
- 'name': case_id}
- Edit only this block to suit your folder / filename scheme.
- """
- cases = []
-
- # metadata
- df = pd.read_csv(f"{self.root_dir}/participants.tsv", sep="\t")
- # update with new splits
-
- # filter FCD samples
- fcd_df = df[df['group'] == 'fcd'].copy()
- # shuffle indices
- fcd_df = fcd_df.sample(frac=1, random_state=42).reset_index(drop=True)
- # compute split index
- n_train = int(len(fcd_df) * 0.9)
- # assign split labels
- fcd_df.loc[:n_train-1, 'split'] = 'train'
- fcd_df.loc[n_train:, 'split'] = 'val'
- #update
- df.loc[fcd_df.index, 'split'] = fcd_df['split']
-
- missing = []
-
- for participant_id in df[(df['split']==self.subset) & (df['group']=='fcd')]['participant_id']:
- case_dir = f"{self.root_dir}/{participant_id}/anat/"
- files = os.listdir(case_dir)
-
- img_dict = {}
- for mod in self.modalities:
- pattern = re.compile(rf"^{re.escape(participant_id)}.*{re.escape(mod)}\.nii\.gz$")
- matches = [f for f in files if pattern.match(f)]
- assert matches, f"Missing {mod} for {participant_id} in {case_dir}"
- img_dict[mod] = os.path.join(case_dir, matches[0])
-
- mask_matches = [f for f in files if re.match(rf"^{re.escape(participant_id)}.*roi\.nii\.gz$", f)]
- mask_path = os.path.join(case_dir, mask_matches[0])
-
- cases.append({'img': img_dict, 'mask': mask_path, 'name': participant_id})
-
- return cases
-
- # ------------------------------------------------------------
- def _pad_to_cube(self, vol, fill=0.0):
- """Symmetric 3-D pad to [img_size³]. `vol` is [*, D, H, W]."""
- D, H, W = vol.shape[-3:]
- pad_D, pad_H, pad_W = self.img_size - D, self.img_size - H, self.img_size - W
- pad = (pad_W // 2, pad_W - pad_W // 2,
- pad_H // 2, pad_H - pad_H // 2,
- pad_D // 2, pad_D - pad_D // 2)
- return nn.functional.pad(vol, pad, value=fill)
-
- # ------------------------------------------------------------
- def __getitem__(self, idx):
- rec = self.cases[idx]
- name = rec['name']
-
- # ---------- load C modalities --------------------------
- vols = []
- for mod in self.modalities:
- mod_path = rec['img'][mod]
- arr = nibabel.load(mod_path).get_fdata().astype(np.float32)
-
- # robust min-max clipping and normalization
- lo, hi = np.quantile(arr, [0.001, 0.999])
- arr = np.clip(arr, lo, hi)
- arr = (arr - lo) / (hi - lo + 1e-6)
-
- vols.append(torch.from_numpy(arr))
-
- first_mod = self.modalities[0]
- nii_obj = nibabel.load(rec['img'][first_mod])
- affine = nii_obj.affine
-
- Y = torch.stack(vols, dim=0) # [C, D, H, W]
-
- # ---------- load mask ----------------------------------
- M_arr = nibabel.load(rec['mask']).get_fdata().astype(np.uint8)
- M = torch.from_numpy(M_arr).unsqueeze(0) # [1, D, H, W]
- M = (M > 0).to(Y.dtype)
-
- # ---------- pad (and optional downsample) --------------
- Y = self._pad_to_cube(Y, fill=0.0)
- M = self._pad_to_cube(M, fill=0.0)
- if self.img_size == 128:
- pool = nn.AvgPool3d(2, 2)
- Y = pool(Y); M = pool(M)
-
- # ---------- derive context image -----------------------
- Y_void = Y * (1 - M)
-
- return Y, M, Y_void, name, affine # shapes: [C, D, H, W], [1, D, H, W], [C, D, H, W], ...
-
- # ------------------------------------------------------------
- def __len__(self):
- return len(self.cases)
\ No newline at end of file
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/lidcloader-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/lidcloader-checkpoint.py
deleted file mode 100644
index 294278919e92bb8df59cc1b4744505c2164cd37f..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/lidcloader-checkpoint.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.utils.data
-import os
-import os.path
-import nibabel
-
-
-class LIDCVolumes(torch.utils.data.Dataset):
- def __init__(self, directory, test_flag=False, normalize=None, mode='train', img_size=256):
- '''
- directory is expected to contain some folder structure:
- if some subfolder contains only files, all of these
- files are assumed to have the name: processed.nii.gz
- '''
- super().__init__()
- self.mode = mode
- self.directory = os.path.expanduser(directory)
- self.normalize = normalize or (lambda x: x)
- self.test_flag = test_flag
- self.img_size = img_size
- self.database = []
-
- if not self.mode == 'fake':
- for root, dirs, files in os.walk(self.directory):
- # if there are no subdirs, we have a datadir
- if not dirs:
- files.sort()
- datapoint = dict()
- # extract all files as channels
- for f in files:
- datapoint['image'] = os.path.join(root, f)
- if len(datapoint) != 0:
- self.database.append(datapoint)
- else:
- for root, dirs, files in os.walk(self.directory):
- for f in files:
- datapoint = dict()
- datapoint['image'] = os.path.join(root, f)
- self.database.append(datapoint)
-
- def __getitem__(self, x):
- filedict = self.database[x]
- name = filedict['image']
- nib_img = nibabel.load(name)
- out = nib_img.get_fdata()
-
- if not self.mode == 'fake':
- out = torch.Tensor(out)
-
- image = torch.zeros(1, 256, 256, 256)
- image[:, :, :, :] = out
-
- if self.img_size == 128:
- downsample = nn.AvgPool3d(kernel_size=2, stride=2)
- image = downsample(image)
- else:
- image = torch.tensor(out, dtype=torch.float32)
- image = image.unsqueeze(dim=0)
-
- # normalization
- image = self.normalize(image)
-
- if self.mode == 'fake':
- return image, name
- else:
- return image
-
- def __len__(self):
- return len(self.database)
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/logger-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/logger-checkpoint.py
deleted file mode 100644
index 880e9b881716a811cc657a27ac498ea7f0ea83dd..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/logger-checkpoint.py
+++ /dev/null
@@ -1,495 +0,0 @@
-"""
-Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
-https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
-"""
-
-import os
-import sys
-import shutil
-import os.path as osp
-import json
-import time
-import datetime
-import tempfile
-import warnings
-from collections import defaultdict
-from contextlib import contextmanager
-
-DEBUG = 10
-INFO = 20
-WARN = 30
-ERROR = 40
-
-DISABLED = 50
-
-
-class KVWriter(object):
- def writekvs(self, kvs):
- raise NotImplementedError
-
-
-class SeqWriter(object):
- def writeseq(self, seq):
- raise NotImplementedError
-
-
-class HumanOutputFormat(KVWriter, SeqWriter):
- def __init__(self, filename_or_file):
- if isinstance(filename_or_file, str):
- self.file = open(filename_or_file, "wt")
- self.own_file = True
- else:
- assert hasattr(filename_or_file, "read"), (
- "expected file or str, got %s" % filename_or_file
- )
- self.file = filename_or_file
- self.own_file = False
-
- def writekvs(self, kvs):
- # Create strings for printing
- key2str = {}
- for (key, val) in sorted(kvs.items()):
- if hasattr(val, "__float__"):
- valstr = "%-8.3g" % val
- else:
- valstr = str(val)
- key2str[self._truncate(key)] = self._truncate(valstr)
-
- # Find max widths
- if len(key2str) == 0:
- print("WARNING: tried to write empty key-value dict")
- return
- else:
- keywidth = max(map(len, key2str.keys()))
- valwidth = max(map(len, key2str.values()))
-
- # Write out the data
- dashes = "-" * (keywidth + valwidth + 7)
- lines = [dashes]
- for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
- lines.append(
- "| %s%s | %s%s |"
- % (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
- )
- lines.append(dashes)
- self.file.write("\n".join(lines) + "\n")
-
- # Flush the output to the file
- self.file.flush()
-
- def _truncate(self, s):
- maxlen = 30
- return s[: maxlen - 3] + "..." if len(s) > maxlen else s
-
- def writeseq(self, seq):
- seq = list(seq)
- for (i, elem) in enumerate(seq):
- self.file.write(elem)
- if i < len(seq) - 1: # add space unless this is the last one
- self.file.write(" ")
- self.file.write("\n")
- self.file.flush()
-
- def close(self):
- if self.own_file:
- self.file.close()
-
-
-class JSONOutputFormat(KVWriter):
- def __init__(self, filename):
- self.file = open(filename, "wt")
-
- def writekvs(self, kvs):
- for k, v in sorted(kvs.items()):
- if hasattr(v, "dtype"):
- kvs[k] = float(v)
- self.file.write(json.dumps(kvs) + "\n")
- self.file.flush()
-
- def close(self):
- self.file.close()
-
-
-class CSVOutputFormat(KVWriter):
- def __init__(self, filename):
- self.file = open(filename, "w+t")
- self.keys = []
- self.sep = ","
-
- def writekvs(self, kvs):
- # Add our current row to the history
- extra_keys = list(kvs.keys() - self.keys)
- extra_keys.sort()
- if extra_keys:
- self.keys.extend(extra_keys)
- self.file.seek(0)
- lines = self.file.readlines()
- self.file.seek(0)
- for (i, k) in enumerate(self.keys):
- if i > 0:
- self.file.write(",")
- self.file.write(k)
- self.file.write("\n")
- for line in lines[1:]:
- self.file.write(line[:-1])
- self.file.write(self.sep * len(extra_keys))
- self.file.write("\n")
- for (i, k) in enumerate(self.keys):
- if i > 0:
- self.file.write(",")
- v = kvs.get(k)
- if v is not None:
- self.file.write(str(v))
- self.file.write("\n")
- self.file.flush()
-
- def close(self):
- self.file.close()
-
-
-class TensorBoardOutputFormat(KVWriter):
- """
- Dumps key/value pairs into TensorBoard's numeric format.
- """
-
- def __init__(self, dir):
- os.makedirs(dir, exist_ok=True)
- self.dir = dir
- self.step = 1
- prefix = "events"
- path = osp.join(osp.abspath(dir), prefix)
- import tensorflow as tf
- from tensorflow.python import pywrap_tensorflow
- from tensorflow.core.util import event_pb2
- from tensorflow.python.util import compat
-
- self.tf = tf
- self.event_pb2 = event_pb2
- self.pywrap_tensorflow = pywrap_tensorflow
- self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
-
- def writekvs(self, kvs):
- def summary_val(k, v):
- kwargs = {"tag": k, "simple_value": float(v)}
- return self.tf.Summary.Value(**kwargs)
-
- summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
- event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
- event.step = (
- self.step
- ) # is there any reason why you'd want to specify the step?
- self.writer.WriteEvent(event)
- self.writer.Flush()
- self.step += 1
-
- def close(self):
- if self.writer:
- self.writer.Close()
- self.writer = None
-
-
-def make_output_format(format, ev_dir, log_suffix=""):
- os.makedirs(ev_dir, exist_ok=True)
- if format == "stdout":
- return HumanOutputFormat(sys.stdout)
- elif format == "log":
- return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
- elif format == "json":
- return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
- elif format == "csv":
- return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
- elif format == "tensorboard":
- return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
- else:
- raise ValueError("Unknown format specified: %s" % (format,))
-
-
-# ================================================================
-# API
-# ================================================================
-
-
-def logkv(key, val):
- """
- Log a value of some diagnostic
- Call this once for each diagnostic quantity, each iteration
- If called many times, last value will be used.
- """
- get_current().logkv(key, val)
-
-
-def logkv_mean(key, val):
- """
- The same as logkv(), but if called many times, values averaged.
- """
- get_current().logkv_mean(key, val)
-
-
-def logkvs(d):
- """
- Log a dictionary of key-value pairs
- """
- for (k, v) in d.items():
- logkv(k, v)
-
-
-def dumpkvs():
- """
- Write all of the diagnostics from the current iteration
- """
- return get_current().dumpkvs()
-
-
-def getkvs():
- return get_current().name2val
-
-
-def log(*args, level=INFO):
- """
- Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
- """
- get_current().log(*args, level=level)
-
-
-def debug(*args):
- log(*args, level=DEBUG)
-
-
-def info(*args):
- log(*args, level=INFO)
-
-
-def warn(*args):
- log(*args, level=WARN)
-
-
-def error(*args):
- log(*args, level=ERROR)
-
-
-def set_level(level):
- """
- Set logging threshold on current logger.
- """
- get_current().set_level(level)
-
-
-def set_comm(comm):
- get_current().set_comm(comm)
-
-
-def get_dir():
- """
- Get directory that log files are being written to.
- will be None if there is no output directory (i.e., if you didn't call start)
- """
- return get_current().get_dir()
-
-
-record_tabular = logkv
-dump_tabular = dumpkvs
-
-
-@contextmanager
-def profile_kv(scopename):
- logkey = "wait_" + scopename
- tstart = time.time()
- try:
- yield
- finally:
- get_current().name2val[logkey] += time.time() - tstart
-
-
-def profile(n):
- """
- Usage:
- @profile("my_func")
- def my_func(): code
- """
-
- def decorator_with_name(func):
- def func_wrapper(*args, **kwargs):
- with profile_kv(n):
- return func(*args, **kwargs)
-
- return func_wrapper
-
- return decorator_with_name
-
-
-# ================================================================
-# Backend
-# ================================================================
-
-
-def get_current():
- if Logger.CURRENT is None:
- _configure_default_logger()
-
- return Logger.CURRENT
-
-
-class Logger(object):
- DEFAULT = None # A logger with no output files. (See right below class definition)
- # So that you can still log to the terminal without setting up any output files
- CURRENT = None # Current logger being used by the free functions above
-
- def __init__(self, dir, output_formats, comm=None):
- self.name2val = defaultdict(float) # values this iteration
- self.name2cnt = defaultdict(int)
- self.level = INFO
- self.dir = dir
- self.output_formats = output_formats
- self.comm = comm
-
- # Logging API, forwarded
- # ----------------------------------------
- def logkv(self, key, val):
- self.name2val[key] = val
-
- def logkv_mean(self, key, val):
- oldval, cnt = self.name2val[key], self.name2cnt[key]
- self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
- self.name2cnt[key] = cnt + 1
-
- def dumpkvs(self):
- if self.comm is None:
- d = self.name2val
- else:
- d = mpi_weighted_mean(
- self.comm,
- {
- name: (val, self.name2cnt.get(name, 1))
- for (name, val) in self.name2val.items()
- },
- )
- if self.comm.rank != 0:
- d["dummy"] = 1 # so we don't get a warning about empty dict
- out = d.copy() # Return the dict for unit testing purposes
- for fmt in self.output_formats:
- if isinstance(fmt, KVWriter):
- fmt.writekvs(d)
- self.name2val.clear()
- self.name2cnt.clear()
- return out
-
- def log(self, *args, level=INFO):
- if self.level <= level:
- self._do_log(args)
-
- # Configuration
- # ----------------------------------------
- def set_level(self, level):
- self.level = level
-
- def set_comm(self, comm):
- self.comm = comm
-
- def get_dir(self):
- return self.dir
-
- def close(self):
- for fmt in self.output_formats:
- fmt.close()
-
- # Misc
- # ----------------------------------------
- def _do_log(self, args):
- for fmt in self.output_formats:
- if isinstance(fmt, SeqWriter):
- fmt.writeseq(map(str, args))
-
-
-def get_rank_without_mpi_import():
- # check environment variables here instead of importing mpi4py
- # to avoid calling MPI_Init() when this module is imported
- for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
- if varname in os.environ:
- return int(os.environ[varname])
- return 0
-
-
-def mpi_weighted_mean(comm, local_name2valcount):
- """
- Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
- Perform a weighted average over dicts that are each on a different node
- Input: local_name2valcount: dict mapping key -> (value, count)
- Returns: key -> mean
- """
- all_name2valcount = comm.gather(local_name2valcount)
- if comm.rank == 0:
- name2sum = defaultdict(float)
- name2count = defaultdict(float)
- for n2vc in all_name2valcount:
- for (name, (val, count)) in n2vc.items():
- try:
- val = float(val)
- except ValueError:
- if comm.rank == 0:
- warnings.warn(
- "WARNING: tried to compute mean on non-float {}={}".format(
- name, val
- )
- )
- else:
- name2sum[name] += val * count
- name2count[name] += count
- return {name: name2sum[name] / name2count[name] for name in name2sum}
- else:
- return {}
-
-
-def configure(dir='./results', format_strs=None, comm=None, log_suffix=""):
- """
- If comm is provided, average all numerical stats across that comm
- """
- if dir is None:
- dir = os.getenv("OPENAI_LOGDIR")
- if dir is None:
- dir = osp.join(
- tempfile.gettempdir(),
- datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
- )
- assert isinstance(dir, str)
- dir = os.path.expanduser(dir)
- os.makedirs(os.path.expanduser(dir), exist_ok=True)
-
- rank = get_rank_without_mpi_import()
- if rank > 0:
- log_suffix = log_suffix + "-rank%03i" % rank
-
- if format_strs is None:
- if rank == 0:
- format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
- else:
- format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
- format_strs = filter(None, format_strs)
- output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
-
- Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
- if output_formats:
- log("Logging to %s" % dir)
-
-
-def _configure_default_logger():
- configure()
- Logger.DEFAULT = Logger.CURRENT
-
-
-def reset():
- if Logger.CURRENT is not Logger.DEFAULT:
- Logger.CURRENT.close()
- Logger.CURRENT = Logger.DEFAULT
- log("Reset logger")
-
-
-@contextmanager
-def scoped_configure(dir=None, format_strs=None, comm=None):
- prevlogger = Logger.CURRENT
- configure(dir=dir, format_strs=format_strs, comm=comm)
- try:
- yield
- finally:
- Logger.CURRENT.close()
- Logger.CURRENT = prevlogger
-
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/losses-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/losses-checkpoint.py
deleted file mode 100644
index 251e42e4f36a31bb5e1aeda874b3a45d722000a2..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/losses-checkpoint.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-Helpers for various likelihood-based losses. These are ported from the original
-Ho et al. diffusion models codebase:
-https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
-"""
-
-import numpy as np
-
-import torch as th
-
-
-def normal_kl(mean1, logvar1, mean2, logvar2):
- """
- Compute the KL divergence between two gaussians.
-
- Shapes are automatically broadcasted, so batches can be compared to
- scalars, among other use cases.
- """
- tensor = None
- for obj in (mean1, logvar1, mean2, logvar2):
- if isinstance(obj, th.Tensor):
- tensor = obj
- break
- assert tensor is not None, "at least one argument must be a Tensor"
-
- # Force variances to be Tensors. Broadcasting helps convert scalars to
- # Tensors, but it does not work for th.exp().
- logvar1, logvar2 = [
- x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
- for x in (logvar1, logvar2)
- ]
-
- return 0.5 * (
- -1.0
- + logvar2
- - logvar1
- + th.exp(logvar1 - logvar2)
- + ((mean1 - mean2) ** 2) * th.exp(-logvar2)
- )
-
-
-def approx_standard_normal_cdf(x):
- """
- A fast approximation of the cumulative distribution function of the
- standard normal.
- """
- return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
-
-
-def discretized_gaussian_log_likelihood(x, *, means, log_scales):
- """
- Compute the log-likelihood of a Gaussian distribution discretizing to a
- given image.
-
- :param x: the target images. It is assumed that this was uint8 values,
- rescaled to the range [-1, 1].
- :param means: the Gaussian mean Tensor.
- :param log_scales: the Gaussian log stddev Tensor.
- :return: a tensor like x of log probabilities (in nats).
- """
- assert x.shape == means.shape == log_scales.shape
- centered_x = x - means
- inv_stdv = th.exp(-log_scales)
- plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
- cdf_plus = approx_standard_normal_cdf(plus_in)
- min_in = inv_stdv * (centered_x - 1.0 / 255.0)
- cdf_min = approx_standard_normal_cdf(min_in)
- log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
- log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
- cdf_delta = cdf_plus - cdf_min
- log_probs = th.where(
- x < -0.999,
- log_cdf_plus,
- th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
- )
- assert log_probs.shape == x.shape
- return log_probs
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/nn-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/nn-checkpoint.py
deleted file mode 100644
index 58c287a4acb0d2d6018827130f71214f21cfd96d..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/nn-checkpoint.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""
-Various utilities for neural networks.
-"""
-
-import math
-
-import torch as th
-import torch.nn as nn
-
-
-# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
-class SiLU(nn.Module):
- def forward(self, x):
- return x * th.sigmoid(x)
-
-
-class GroupNorm32(nn.GroupNorm):
- def forward(self, x):
- return super().forward(x.float()).type(x.dtype)
-
-
-def conv_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D convolution module.
- """
- if dims == 1:
- return nn.Conv1d(*args, **kwargs)
- elif dims == 2:
- return nn.Conv2d(*args, **kwargs)
- elif dims == 3:
- return nn.Conv3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def linear(*args, **kwargs):
- """
- Create a linear module.
- """
- return nn.Linear(*args, **kwargs)
-
-
-def avg_pool_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D average pooling module.
- """
- if dims == 1:
- return nn.AvgPool1d(*args, **kwargs)
- elif dims == 2:
- return nn.AvgPool2d(*args, **kwargs)
- elif dims == 3:
- return nn.AvgPool3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def update_ema(target_params, source_params, rate=0.99):
- """
- Update target parameters to be closer to those of source parameters using
- an exponential moving average.
-
- :param target_params: the target parameter sequence.
- :param source_params: the source parameter sequence.
- :param rate: the EMA rate (closer to 1 means slower).
- """
- for targ, src in zip(target_params, source_params):
- targ.detach().mul_(rate).add_(src, alpha=1 - rate)
-
-
-def zero_module(module):
- """
- Zero out the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-def scale_module(module, scale):
- """
- Scale the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().mul_(scale)
- return module
-
-
-def mean_flat(tensor):
- """
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(2, len(tensor.shape))))
-
-
-def normalization(channels, groups=32):
- """
- Make a standard normalization layer.
-
- :param channels: number of input channels.
- :return: an nn.Module for normalization.
- """
- return GroupNorm32(groups, channels)
-
-
-def timestep_embedding(timesteps, dim, max_period=10000):
- """
- Create sinusoidal timestep embeddings.
-
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
- These may be fractional.
- :param dim: the dimension of the output.
- :param max_period: controls the minimum frequency of the embeddings.
- :return: an [N x dim] Tensor of positional embeddings.
- """
- half = dim // 2
- freqs = th.exp(
- -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
- ).to(device=timesteps.device)
- args = timesteps[:, None].float() * freqs[None]
- embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
- if dim % 2:
- embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
- return embedding
-
-
-def checkpoint(func, inputs, params, flag):
- """
- Evaluate a function without caching intermediate activations, allowing for
- reduced memory at the expense of extra compute in the backward pass.
-
- :param func: the function to evaluate.
- :param inputs: the argument sequence to pass to `func`.
- :param params: a sequence of parameters `func` depends on but does not
- explicitly take as arguments.
- :param flag: if False, disable gradient checkpointing.
- """
- if flag:
- args = tuple(inputs) + tuple(params)
- return CheckpointFunction.apply(func, len(inputs), *args)
- else:
- return func(*inputs)
-
-
-class CheckpointFunction(th.autograd.Function):
- @staticmethod
- def forward(ctx, run_function, length, *args):
- ctx.run_function = run_function
- ctx.input_tensors = list(args[:length])
- ctx.input_params = list(args[length:])
- with th.no_grad():
- output_tensors = ctx.run_function(*ctx.input_tensors)
- return output_tensors
-
- @staticmethod
- def backward(ctx, *output_grads):
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
- with th.enable_grad():
- # Fixes a bug where the first op in run_function modifies the
- # Tensor storage in place, which is not allowed for detach()'d
- # Tensors.
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
- output_tensors = ctx.run_function(*shallow_copies)
- input_grads = th.autograd.grad(
- output_tensors,
- ctx.input_tensors + ctx.input_params,
- output_grads,
- allow_unused=True,
- )
- del ctx.input_tensors
- del ctx.input_params
- del output_tensors
- return (None, None) + input_grads
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/resample-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/resample-checkpoint.py
deleted file mode 100644
index edbeef26f7eec6dbe0158c1a08b404d7de9c5416..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/resample-checkpoint.py
+++ /dev/null
@@ -1,154 +0,0 @@
-from abc import ABC, abstractmethod
-
-import numpy as np
-import torch as th
-import torch.distributed as dist
-
-
-def create_named_schedule_sampler(name, diffusion, maxt):
- """
- Create a ScheduleSampler from a library of pre-defined samplers.
-
- :param name: the name of the sampler.
- :param diffusion: the diffusion object to sample for.
- """
- if name == "uniform":
- return UniformSampler(diffusion, maxt)
- elif name == "loss-second-moment":
- return LossSecondMomentResampler(diffusion)
- else:
- raise NotImplementedError(f"unknown schedule sampler: {name}")
-
-
-class ScheduleSampler(ABC):
- """
- A distribution over timesteps in the diffusion process, intended to reduce
- variance of the objective.
-
- By default, samplers perform unbiased importance sampling, in which the
- objective's mean is unchanged.
- However, subclasses may override sample() to change how the resampled
- terms are reweighted, allowing for actual changes in the objective.
- """
-
- @abstractmethod
- def weights(self):
- """
- Get a numpy array of weights, one per diffusion step.
-
- The weights needn't be normalized, but must be positive.
- """
-
- def sample(self, batch_size, device):
- """
- Importance-sample timesteps for a batch.
-
- :param batch_size: the number of timesteps.
- :param device: the torch device to save to.
- :return: a tuple (timesteps, weights):
- - timesteps: a tensor of timestep indices.
- - weights: a tensor of weights to scale the resulting losses.
- """
- w = self.weights()
- p = w / np.sum(w)
- indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
- indices = th.from_numpy(indices_np).long().to(device)
- weights_np = 1 / (len(p) * p[indices_np])
- weights = th.from_numpy(weights_np).float().to(device)
- return indices, weights
-
-
-class UniformSampler(ScheduleSampler):
- def __init__(self, diffusion, maxt):
- self.diffusion = diffusion
- self._weights = np.ones([maxt])
-
- def weights(self):
- return self._weights
-
-
-class LossAwareSampler(ScheduleSampler):
- def update_with_local_losses(self, local_ts, local_losses):
- """
- Update the reweighting using losses from a model.
-
- Call this method from each rank with a batch of timesteps and the
- corresponding losses for each of those timesteps.
- This method will perform synchronization to make sure all of the ranks
- maintain the exact same reweighting.
-
- :param local_ts: an integer Tensor of timesteps.
- :param local_losses: a 1D Tensor of losses.
- """
- batch_sizes = [
- th.tensor([0], dtype=th.int32, device=local_ts.device)
- for _ in range(dist.get_world_size())
- ]
- dist.all_gather(
- batch_sizes,
- th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
- )
-
- # Pad all_gather batches to be the maximum batch size.
- batch_sizes = [x.item() for x in batch_sizes]
- max_bs = max(batch_sizes)
-
- timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
- loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
- dist.all_gather(timestep_batches, local_ts)
- dist.all_gather(loss_batches, local_losses)
- timesteps = [
- x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
- ]
- losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
- self.update_with_all_losses(timesteps, losses)
-
- @abstractmethod
- def update_with_all_losses(self, ts, losses):
- """
- Update the reweighting using losses from a model.
-
- Sub-classes should override this method to update the reweighting
- using losses from the model.
-
- This method directly updates the reweighting without synchronizing
- between workers. It is called by update_with_local_losses from all
- ranks with identical arguments. Thus, it should have deterministic
- behavior to maintain state across workers.
-
- :param ts: a list of int timesteps.
- :param losses: a list of float losses, one per timestep.
- """
-
-
-class LossSecondMomentResampler(LossAwareSampler):
- def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
- self.diffusion = diffusion
- self.history_per_term = history_per_term
- self.uniform_prob = uniform_prob
- self._loss_history = np.zeros(
- [diffusion.num_timesteps, history_per_term], dtype=np.float64
- )
- self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
-
- def weights(self):
- if not self._warmed_up():
- return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
- weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
- weights /= np.sum(weights)
- weights *= 1 - self.uniform_prob
- weights += self.uniform_prob / len(weights)
- return weights
-
- def update_with_all_losses(self, ts, losses):
- for t, loss in zip(ts, losses):
- if self._loss_counts[t] == self.history_per_term:
- # Shift out the oldest loss term.
- self._loss_history[t, :-1] = self._loss_history[t, 1:]
- self._loss_history[t, -1] = loss
- else:
- self._loss_history[t, self._loss_counts[t]] = loss
- self._loss_counts[t] += 1
-
- def _warmed_up(self):
- return (self._loss_counts == self.history_per_term).all()
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/respace-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/respace-checkpoint.py
deleted file mode 100644
index dc2967fa44871275c02063525259929ec6999e8e..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/respace-checkpoint.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import numpy as np
-import torch as th
-
-from .gaussian_diffusion import GaussianDiffusion
-
-
-def space_timesteps(num_timesteps, section_counts):
- """
- Create a list of timesteps to use from an original diffusion process,
- given the number of timesteps we want to take from equally-sized portions
- of the original process.
-
- For example, if there's 300 timesteps and the section counts are [10,15,20]
- then the first 100 timesteps are strided to be 10 timesteps, the second 100
- are strided to be 15 timesteps, and the final 100 are strided to be 20.
-
- If the stride is a string starting with "ddim", then the fixed striding
- from the DDIM paper is used, and only one section is allowed.
-
- :param num_timesteps: the number of diffusion steps in the original
- process to divide up.
- :param section_counts: either a list of numbers, or a string containing
- comma-separated numbers, indicating the step count
- per section. As a special case, use "ddimN" where N
- is a number of steps to use the striding from the
- DDIM paper.
- :return: a set of diffusion steps from the original process to use.
- """
- if isinstance(section_counts, str):
- if section_counts.startswith("ddim"):
- desired_count = int(section_counts[len("ddim") :])
- print('desired_cound', desired_count )
- for i in range(1, num_timesteps):
- if len(range(0, num_timesteps, i)) == desired_count:
- return set(range(0, num_timesteps, i))
- raise ValueError(
- f"cannot create exactly {num_timesteps} steps with an integer stride"
- )
- section_counts = [int(x) for x in section_counts.split(",")]
- # print('sectioncount', section_counts)
- size_per = num_timesteps // len(section_counts)
- extra = num_timesteps % len(section_counts)
- start_idx = 0
- all_steps = []
- for i, section_count in enumerate(section_counts):
- size = size_per + (1 if i < extra else 0)
- if size < section_count:
- raise ValueError(
- f"cannot divide section of {size} steps into {section_count}"
- )
- if section_count <= 1:
- frac_stride = 1
- else:
- frac_stride = (size - 1) / (section_count - 1)
- cur_idx = 0.0
- taken_steps = []
- for _ in range(section_count):
- taken_steps.append(start_idx + round(cur_idx))
- cur_idx += frac_stride
- all_steps += taken_steps
- start_idx += size
- return set(all_steps)
-
-
-class SpacedDiffusion(GaussianDiffusion):
- """
- A diffusion process which can skip steps in a base diffusion process.
-
- :param use_timesteps: a collection (sequence or set) of timesteps from the
- original diffusion process to retain.
- :param kwargs: the kwargs to create the base diffusion process.
- """
-
- def __init__(self, use_timesteps, **kwargs):
- self.use_timesteps = set(use_timesteps)
- self.timestep_map = []
- self.original_num_steps = len(kwargs["betas"])
-
- base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
- last_alpha_cumprod = 1.0
- new_betas = []
- for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
- if i in self.use_timesteps:
- new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
- last_alpha_cumprod = alpha_cumprod
- self.timestep_map.append(i)
- kwargs["betas"] = np.array(new_betas)
- super().__init__(**kwargs)
-
- def p_mean_variance(
- self, model, *args, **kwargs
- ): # pylint: disable=signature-differs
- return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
-
- def training_losses(
- self, model, *args, **kwargs
- ): # pylint: disable=signature-differs
- return super().training_losses(self._wrap_model(model), *args, **kwargs)
-
- def condition_mean(self, cond_fn, *args, **kwargs):
- return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
-
- def condition_score(self, cond_fn, *args, **kwargs):
- return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
-
- def _wrap_model(self, model):
- if isinstance(model, _WrappedModel):
- return model
- return _WrappedModel(
- model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
- )
-
-
- def _scale_timesteps(self, t):
- # Scaling is done by the wrapped model.
- return t
-
-
-class _WrappedModel:
- def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
- self.model = model
- self.timestep_map = timestep_map
- self.rescale_timesteps = rescale_timesteps
- self.original_num_steps = original_num_steps
-
-
- def __call__(self, x, ts, **kwargs):
- map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
- new_ts = map_tensor[ts]
- if self.rescale_timesteps:
- new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
- return self.model(x, new_ts, **kwargs)
-
-
-
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/train_util-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/train_util-checkpoint.py
deleted file mode 100644
index df2a0fd3a1ea8fb315e4d8a0780c554ffd665795..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/train_util-checkpoint.py
+++ /dev/null
@@ -1,376 +0,0 @@
-import copy
-import functools
-import os
-
-import blobfile as bf
-import torch as th
-import torch.distributed as dist
-import torch.utils.tensorboard
-from torch.optim import AdamW
-import torch.cuda.amp as amp
-
-import itertools
-
-from . import dist_util, logger
-from .resample import LossAwareSampler, UniformSampler
-from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
-
-INITIAL_LOG_LOSS_SCALE = 20.0
-
-def visualize(img):
- _min = img.min()
- _max = img.max()
- normalized_img = (img - _min)/ (_max - _min)
- return normalized_img
-
-class TrainLoop:
- def __init__(
- self,
- *,
- model,
- diffusion,
- data,
- batch_size,
- in_channels,
- image_size,
- microbatch,
- lr,
- ema_rate,
- log_interval,
- save_interval,
- resume_checkpoint,
- resume_step,
- use_fp16=False,
- fp16_scale_growth=1e-3,
- schedule_sampler=None,
- weight_decay=0.0,
- lr_anneal_steps=0,
- dataset='brats',
- summary_writer=None,
- mode='default',
- loss_level='image',
- ):
- self.summary_writer = summary_writer
- self.mode = mode
- self.model = model
- self.diffusion = diffusion
- self.datal = data
- self.dataset = dataset
- self.iterdatal = iter(data)
- self.batch_size = batch_size
- self.in_channels = in_channels
- self.image_size = image_size
- self.microbatch = microbatch if microbatch > 0 else batch_size
- self.lr = lr
- self.ema_rate = (
- [ema_rate]
- if isinstance(ema_rate, float)
- else [float(x) for x in ema_rate.split(",")]
- )
- self.log_interval = log_interval
- self.save_interval = save_interval
- self.resume_checkpoint = resume_checkpoint
- self.use_fp16 = use_fp16
- if self.use_fp16:
- self.grad_scaler = amp.GradScaler()
- else:
- self.grad_scaler = amp.GradScaler(enabled=False)
-
- self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
- self.weight_decay = weight_decay
- self.lr_anneal_steps = lr_anneal_steps
-
- self.dwt = DWT_3D('haar')
- self.idwt = IDWT_3D('haar')
-
- self.loss_level = loss_level
-
- self.step = 1
- self.resume_step = resume_step
- self.global_batch = self.batch_size * dist.get_world_size()
-
- self.sync_cuda = th.cuda.is_available()
-
- self._load_and_sync_parameters()
-
- self.opt = AdamW(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
- if self.resume_step:
- print("Resume Step: " + str(self.resume_step))
- self._load_optimizer_state()
-
- if not th.cuda.is_available():
- logger.warn(
- "Training requires CUDA. "
- )
-
- def _load_and_sync_parameters(self):
- resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
-
- if resume_checkpoint:
- print('resume model ...')
- self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
- if dist.get_rank() == 0:
- logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
- self.model.load_state_dict(
- dist_util.load_state_dict(
- resume_checkpoint, map_location=dist_util.dev()
- )
- )
-
- dist_util.sync_params(self.model.parameters())
-
-
- def _load_optimizer_state(self):
- main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
- opt_checkpoint = bf.join(
- bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
- )
- if bf.exists(opt_checkpoint):
- logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
- state_dict = dist_util.load_state_dict(
- opt_checkpoint, map_location=dist_util.dev()
- )
- self.opt.load_state_dict(state_dict)
- else:
- print('no optimizer checkpoint exists')
-
- def run_loop(self):
- import time
- t = time.time()
- while not self.lr_anneal_steps or self.step + self.resume_step < self.lr_anneal_steps:
- t_total = time.time() - t
- t = time.time()
- if self.dataset in ['brats', 'lidc-idri']:
- try:
- batch = next(self.iterdatal)
- cond = {}
- except StopIteration:
- self.iterdatal = iter(self.datal)
- batch = next(self.iterdatal)
- cond = {}
-
- batch = batch.to(dist_util.dev())
-
- t_fwd = time.time()
- t_load = t_fwd-t
-
- lossmse, sample, sample_idwt = self.run_step(batch, cond)
-
- t_fwd = time.time()-t_fwd
-
- names = ["LLL", "LLH", "LHL", "LHH", "HLL", "HLH", "HHL", "HHH"]
-
- if self.summary_writer is not None:
- self.summary_writer.add_scalar('time/load', t_load, global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('time/forward', t_fwd, global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('time/total', t_total, global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/MSE', lossmse.item(), global_step=self.step + self.resume_step)
-
- if self.step % 200 == 0:
- image_size = sample_idwt.size()[2]
- midplane = sample_idwt[0, 0, :, :, image_size // 2]
- self.summary_writer.add_image('sample/x_0', midplane.unsqueeze(0),
- global_step=self.step + self.resume_step)
-
- image_size = sample.size()[2]
- for ch in range(8):
- midplane = sample[0, ch, :, :, image_size // 2]
- self.summary_writer.add_image('sample/{}'.format(names[ch]), midplane.unsqueeze(0),
- global_step=self.step + self.resume_step)
-
- if self.step % self.log_interval == 0:
- logger.dumpkvs()
-
- if self.step % self.save_interval == 0:
- self.save()
- # Run for a finite amount of time in integration tests.
- if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
- return
- self.step += 1
-
- # Save the last checkpoint if it wasn't already saved.
- if (self.step - 1) % self.save_interval != 0:
- self.save()
-
- def run_step(self, batch, cond, label=None, info=dict()):
- lossmse, sample, sample_idwt = self.forward_backward(batch, cond, label)
-
- if self.use_fp16:
- self.grad_scaler.unscale_(self.opt) # check self.grad_scaler._per_optimizer_states
-
- # compute norms
- with torch.no_grad():
- param_max_norm = max([p.abs().max().item() for p in self.model.parameters()])
- grad_max_norm = max([p.grad.abs().max().item() for p in self.model.parameters()])
- info['norm/param_max'] = param_max_norm
- info['norm/grad_max'] = grad_max_norm
-
- if not torch.isfinite(lossmse): #infinite
- if not torch.isfinite(torch.tensor(param_max_norm)):
- logger.log(f"Model parameters contain non-finite value {param_max_norm}, entering breakpoint", level=logger.ERROR)
- breakpoint()
- else:
- logger.log(f"Model parameters are finite, but loss is not: {lossmse}"
- "\n -> update will be skipped in grad_scaler.step()", level=logger.WARN)
-
- if self.use_fp16:
- print("Use fp16 ...")
- self.grad_scaler.step(self.opt)
- self.grad_scaler.update()
- info['scale'] = self.grad_scaler.get_scale()
- else:
- self.opt.step()
- self._anneal_lr()
- self.log_step()
- return lossmse, sample, sample_idwt
-
- def forward_backward(self, batch, cond, label=None):
- for p in self.model.parameters(): # Zero out gradient
- p.grad = None
-
- for i in range(0, batch.shape[0], self.microbatch):
- micro = batch[i: i + self.microbatch].to(dist_util.dev())
-
- if label is not None:
- micro_label = label[i: i + self.microbatch].to(dist_util.dev())
- else:
- micro_label = None
-
- micro_cond = None
-
- last_batch = (i + self.microbatch) >= batch.shape[0]
- t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
-
- compute_losses = functools.partial(self.diffusion.training_losses,
- self.model,
- x_start=micro,
- t=t,
- model_kwargs=micro_cond,
- labels=micro_label,
- mode=self.mode,
- )
- losses1 = compute_losses()
-
- if isinstance(self.schedule_sampler, LossAwareSampler):
- self.schedule_sampler.update_with_local_losses(
- t, losses1["loss"].detach()
- )
-
- losses = losses1[0] # Loss value
- sample = losses1[1] # Denoised subbands at t=0
- sample_idwt = losses1[2] # Inverse wavelet transformed denoised subbands at t=0
-
- # Log wavelet level loss
- self.summary_writer.add_scalar('loss/mse_wav_lll', losses["mse_wav"][0].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_llh', losses["mse_wav"][1].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_lhl', losses["mse_wav"][2].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_lhh', losses["mse_wav"][3].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hll', losses["mse_wav"][4].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hlh', losses["mse_wav"][5].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hhl', losses["mse_wav"][6].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hhh', losses["mse_wav"][7].item(),
- global_step=self.step + self.resume_step)
-
- weights = th.ones(len(losses["mse_wav"])).cuda() # Equally weight all wavelet channel losses
-
- loss = (losses["mse_wav"] * weights).mean()
- lossmse = loss.detach()
-
- log_loss_dict(self.diffusion, t, {k: v * weights for k, v in losses.items()})
-
- # perform some finiteness checks
- if not torch.isfinite(loss):
- logger.log(f"Encountered non-finite loss {loss}")
- if self.use_fp16:
- self.grad_scaler.scale(loss).backward()
- else:
- loss.backward()
-
- return lossmse.detach(), sample, sample_idwt
-
- def _anneal_lr(self):
- if not self.lr_anneal_steps:
- return
- frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
- lr = self.lr * (1 - frac_done)
- for param_group in self.opt.param_groups:
- param_group["lr"] = lr
-
- def log_step(self):
- logger.logkv("step", self.step + self.resume_step)
- logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
-
- def save(self):
- def save_checkpoint(rate, state_dict):
- if dist.get_rank() == 0:
- logger.log("Saving model...")
- if self.dataset == 'brats':
- filename = f"brats_{(self.step+self.resume_step):06d}.pt"
- elif self.dataset == 'lidc-idri':
- filename = f"lidc-idri_{(self.step+self.resume_step):06d}.pt"
- else:
- raise ValueError(f'dataset {self.dataset} not implemented')
-
- with bf.BlobFile(bf.join(get_blob_logdir(), 'checkpoints', filename), "wb") as f:
- th.save(state_dict, f)
-
- save_checkpoint(0, self.model.state_dict())
-
- if dist.get_rank() == 0:
- checkpoint_dir = os.path.join(logger.get_dir(), 'checkpoints')
- with bf.BlobFile(
- bf.join(checkpoint_dir, f"opt{(self.step+self.resume_step):06d}.pt"),
- "wb",
- ) as f:
- th.save(self.opt.state_dict(), f)
-
-
-def parse_resume_step_from_filename(filename):
- """
- Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
- checkpoint's number of steps.
- """
-
- split = os.path.basename(filename)
- split = split.split(".")[-2] # remove extension
- split = split.split("_")[-1] # remove possible underscores, keep only last word
- # extract trailing number
- reversed_split = []
- for c in reversed(split):
- if not c.isdigit():
- break
- reversed_split.append(c)
- split = ''.join(reversed(reversed_split))
- split = ''.join(c for c in split if c.isdigit()) # remove non-digits
- try:
- return int(split)
- except ValueError:
- return 0
-
-
-def get_blob_logdir():
- # You can change this to be a separate path to save checkpoints to
- # a blobstore or some external drive.
- return logger.get_dir()
-
-
-def find_resume_checkpoint():
- # On your infrastructure, you may want to override this to automatically
- # discover the latest checkpoint on your blob storage, etc.
- return None
-
-
-def log_loss_dict(diffusion, ts, losses):
- for key, values in losses.items():
- logger.logkv_mean(key, values.mean().item())
- # Log the quantiles (four quartiles, in particular).
- for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
- quartile = int(4 * sub_t / diffusion.num_timesteps)
- logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
diff --git a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/wunet-checkpoint.py b/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/wunet-checkpoint.py
deleted file mode 100644
index 19cec981d803558df54f54aa6c891466a2b62bd0..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/.ipynb_checkpoints/wunet-checkpoint.py
+++ /dev/null
@@ -1,795 +0,0 @@
-from abc import abstractmethod
-
-import math
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .nn import checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding
-from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- A wavelet upsampling layer with an optional convolution on the skip connections used to perform upsampling.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True, use_freq=True):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- self.resample_2d = resample_2d
-
- self.use_freq = use_freq
- self.idwt = IDWT_3D("haar")
-
- # Grouped convolution on 7 high frequency subbands (skip connections)
- if use_conv:
- self.conv = conv_nd(dims, self.channels * 7, self.out_channels * 7, 3, padding=1, groups=7)
-
- def forward(self, x):
- if isinstance(x, tuple):
- skip = x[1]
- x = x[0]
- assert x.shape[1] == self.channels
-
- if self.use_conv:
- skip = self.conv(th.cat(skip, dim=1) / 3.) * 3.
- skip = tuple(th.chunk(skip, 7, dim=1))
-
- if self.use_freq:
- x = self.idwt(3. * x, skip[0], skip[1], skip[2], skip[3], skip[4], skip[5], skip[6])
- else:
- if self.dims == 3 and self.resample_2d:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
-
- return x, None
-
-
-class Downsample(nn.Module):
- """
- A wavelet downsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True, use_freq=True):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
-
- self.use_freq = use_freq
- self.dwt = DWT_3D("haar")
-
- stride = (1, 2, 2) if dims == 3 and resample_2d else 2
-
- if use_conv:
- self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1)
- elif self.use_freq:
- self.op = self.dwt
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- if self.use_freq:
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = self.op(x)
- x = (LLL / 3., (LLH, LHL, LHH, HLL, HLH, HHL, HHH))
- else:
- x = self.op(x)
- return x
-
-
-class WaveletDownsample(nn.Module):
- """
- Implements the wavelet downsampling blocks used to generate the input residuals.
-
- :param in_ch: number of input channels.
- :param out_ch: number of output channels (should match the feature size of the corresponding U-Net level)
- """
- def __init__(self, in_ch=None, out_ch=None):
- super().__init__()
- out_ch = out_ch if out_ch else in_ch
- self.in_ch = in_ch
- self.out_ch = out_ch
- self.conv = conv_nd(3, self.in_ch * 8, self.out_ch, 3, stride=1, padding=1)
- self.dwt = DWT_3D('haar')
-
- def forward(self, x):
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = self.dwt(x)
- x = th.cat((LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH), dim=1) / 3.
- return self.conv(x)
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels via up- or downsampling.
-
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels, otherwise out_channels = channels.
- :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1
- convolution to change the channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- :param num_groups: if specified, the number of groups in the (adaptive) group normalization layers.
- :param use_freq: specifies if frequency aware up- or downsampling should be used.
- :param z_emb_dim: the dimension of the z-embedding.
-
- """
-
- def __init__(self, channels, emb_channels, dropout, out_channels=None, use_conv=True, use_scale_shift_norm=False,
- dims=2, use_checkpoint=False, up=False, down=False, num_groups=32, resample_2d=True, use_freq=False):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_scale_shift_norm = use_scale_shift_norm
- self.use_checkpoint = use_checkpoint
- self.up = up
- self.down = down
- self.num_groups = num_groups
- self.use_freq = use_freq
-
-
- # Define (adaptive) group normalization layers
- self.in_layers = nn.Sequential(
- normalization(channels, self.num_groups),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- # Check if up- or downsampling should be performed by this ResBlock
- self.updown = up or down
- if up:
- self.h_upd = Upsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- self.x_upd = Upsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- elif down:
- self.h_upd = Downsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- self.x_upd = Downsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- # Define the timestep embedding layers
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels),
- )
-
- # Define output layers including (adaptive) group normalization
- self.out_layers = nn.Sequential(
- normalization(self.out_channels, self.num_groups),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)),
- )
-
- # Define skip branch
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
-
- def forward(self, x, temb):
- # Make sure to pipe skip connections
- if isinstance(x, tuple):
- hSkip = x[1]
- else:
- hSkip = None
-
- # Forward pass for ResBlock with up- or downsampling
- if self.updown:
- if self.up:
- x = x[0]
- h = self.in_layers(x)
-
- if self.up:
- h = (h, hSkip)
- x = (x, hSkip)
-
- h, hSkip = self.h_upd(h) # Updown in main branch (ResBlock)
- x, xSkip = self.x_upd(x) # Updown in skip-connection (ResBlock)
-
- # Forward pass for standard ResBlock
- else:
- if isinstance(x, tuple): # Check for skip connection tuple
- x = x[0]
- h = self.in_layers(x)
-
- # Common layers for both standard and updown ResBlocks
- emb_out = self.emb_layers(temb)
-
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
-
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
-
- else:
- h = h + emb_out # Add timestep embedding
- h = self.out_layers(h) # Forward pass out layers
-
- # Add skip connections
- out = self.skip_connection(x) + h
- out = out, hSkip
-
- return out
-
-
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
-
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- num_groups=32,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels, num_groups)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(self._forward, (x,), self.parameters(), True)
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1)
- qkv = self.qkv(self.norm(x))
- h = self.attention(qkv)
- h = self.proj_out(h)
- return (x + h).reshape(b, c, *spatial)
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial ** 2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class WavUNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
-
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set,
- list, or tuple. For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially increased efficiency.
- """
-
- def __init__(self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions,
- dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None,
- use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1,
- use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, num_groups=32,
- bottleneck_attention=True, resample_2d=True, additive_skips=False, decoder_device_thresh=0,
- use_freq=False, progressive_input='residual'):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- # self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- # self.num_heads = num_heads
- # self.num_head_channels = num_head_channels
- # self.num_heads_upsample = num_heads_upsample
- self.num_groups = num_groups
- self.bottleneck_attention = bottleneck_attention
- self.devices = None
- self.decoder_device_thresh = decoder_device_thresh
- self.additive_skips = additive_skips
- self.use_freq = use_freq
- self.progressive_input = progressive_input
-
- #############################
- # TIMESTEP EMBEDDING layers #
- #############################
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim))
-
- ###############
- # INPUT block #
- ###############
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
-
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- input_pyramid_channels =in_channels
- ds = 1
-
- ######################################
- # DOWNWARD path - Feature extraction #
- ######################################
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks): # Adding Residual blocks
- layers = [
- ResBlock(
- channels=ch,
- emb_channels=time_embed_dim,
- dropout=dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- ]
- ch = mult * model_channels # New input channels = channel_mult * base_channels
- # (first ResBlock performs channel adaption)
-
- if ds in attention_resolutions: # Adding Attention layers
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )
- )
-
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
-
- # Adding downsampling operation
- out_ch = ch
- layers = []
- layers.append(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- if resblock_updown
- else Downsample(
- ch,
- conv_resample,
- dims=dims,
- out_channels=out_ch,
- resample_2d=resample_2d,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
-
- layers = []
- if self.progressive_input == 'residual':
- layers.append(WaveletDownsample(in_ch=input_pyramid_channels, out_ch=out_ch))
- input_pyramid_channels = out_ch
-
- self.input_blocks.append(TimestepEmbedSequential(*layers))
-
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.input_block_chans_bk = input_block_chans[:]
-
- #########################
- # LATENT/ MIDDLE blocks #
- #########################
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- ),
- *([AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )] if self.bottleneck_attention else [])
- ,
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- ),
- )
- self._feature_size += ch
-
- #################################
- # UPWARD path - feature mapping #
- #################################
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks+1): # Adding Residual blocks
- if not i == num_res_blocks:
- mid_ch = model_channels * mult
-
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mid_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- ]
- if ds in attention_resolutions: # Adding Attention layers
- layers.append(
- AttentionBlock(
- mid_ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )
- )
- ch = mid_ch
- else: # Adding upsampling operation
- out_ch = ch
- layers.append(
- ResBlock(
- mid_ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- if resblock_updown
- else Upsample(
- mid_ch,
- conv_resample,
- dims=dims,
- out_channels=out_ch,
- resample_2d=resample_2d
- )
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- mid_ch = ch
-
- ################
- # Out ResBlock #
- ################
- self.out_res = nn.ModuleList([])
- for i in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- ]
- self.out_res.append(TimestepEmbedSequential(*layers))
-
- ################
- # OUTPUT block #
- ################
- self.out = nn.Sequential(
- normalization(ch, self.num_groups),
- nn.SiLU(),
- conv_nd(dims, model_channels, out_channels, 3, padding=1),
- )
-
- def to(self, *args, **kwargs):
- """
- we overwrite the to() method for the case where we
- distribute parts of our model to different devices
- """
- if isinstance(args[0], (list, tuple)) and len(args[0]) > 1:
- assert not kwargs and len(args) == 1
- # distribute to multiple devices
- self.devices = args[0]
- # move first half to first device, second half to second device
- self.input_blocks.to(self.devices[0])
- self.time_embed.to(self.devices[0])
- self.middle_block.to(self.devices[0]) # maybe devices 0
- for k, b in enumerate(self.output_blocks):
- if k < self.decoder_device_thresh:
- b.to(self.devices[0])
- else: # after threshold
- b.to(self.devices[1])
- self.out.to(self.devices[0])
- print(f"distributed UNet components to devices {self.devices}")
-
- else: # default behaviour
- super().to(*args, **kwargs)
- if self.devices is None: # if self.devices has not been set yet, read it from params
- p = next(self.parameters())
- self.devices = [p.device, p.device]
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param zemb: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- hs = [] # Save skip-connections here
- input_pyramid = x
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) # Gen sinusoidal timestep embedding
- h = x
- self.hs_shapes = []
-
- for module in self.input_blocks:
- if not isinstance(module[0], WaveletDownsample):
- h = module(h, emb) # Run a downstream module
- skip = None
- if isinstance(h, tuple): # Check for skip features (tuple of high frequency subbands) and store in hs
- h, skip = h
- hs.append(skip)
- self.hs_shapes.append(h.shape)
- else:
- input_pyramid = module(input_pyramid, emb)
- input_pyramid = input_pyramid + h
- h = input_pyramid
-
- for module in self.middle_block:
- h = module(h, emb)
- if isinstance(h, tuple):
- h, skip = h
-
- for module in self.output_blocks:
- new_hs = hs.pop()
- if new_hs:
- skip = new_hs
-
- # Use additive skip connections
- if self.additive_skips:
- h = (h + new_hs) / np.sqrt(2)
-
- # Use frequency aware skip connections
- elif self.use_freq: # You usually want to use the frequency aware upsampling
- if isinstance(h, tuple): # Replace None with the stored skip features
- l = list(h)
- l[1] = skip
- h = tuple(l)
- else:
- h = (h, skip)
-
- # Use concatenation
- else:
- h = th.cat([h, new_hs], dim=1)
-
- h = module(h, emb) # Run an upstream module
-
- for module in self.out_res:
- h = module(h, emb)
-
- h, _ = h
- return self.out(h)
diff --git a/wdm-3d-initial/guided_diffusion/__init__.py b/wdm-3d-initial/guided_diffusion/__init__.py
deleted file mode 100644
index fd11937a64d66c81dffd618117efc3a9ddc5fd9c..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-Codebase for "Diffusion Models for Medial Anomaly Detection".
-"""
diff --git a/wdm-3d-initial/guided_diffusion/bratsloader.py b/wdm-3d-initial/guided_diffusion/bratsloader.py
deleted file mode 100644
index 951e0dda170f9f1bf2370c8bab49596724f13e9d..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/bratsloader.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.utils.data
-import numpy as np
-import os
-import os.path
-import nibabel
-
-
-class BRATSVolumes(torch.utils.data.Dataset):
- def __init__(self, directory, test_flag=False, normalize=None, mode='train', img_size=256):
- '''
- directory is expected to contain some folder structure:
- if some subfolder contains only files, all of these
- files are assumed to have a name like
- brats_train_NNN_XXX_123_w.nii.gz
- where XXX is one of t1n, t1c, t2w, t2f, seg
- we assume these five files belong to the same image
- seg is supposed to contain the segmentation
- '''
- super().__init__()
- self.mode = mode
- self.directory = os.path.expanduser(directory)
- self.normalize = normalize or (lambda x: x)
- self.test_flag = test_flag
- self.img_size = img_size
- if test_flag:
- self.seqtypes = ['t1n', 't1c', 't2w', 't2f']
- else:
- self.seqtypes = ['t1n', 't1c', 't2w', 't2f', 'seg']
- self.seqtypes_set = set(self.seqtypes)
- self.database = []
-
- if not self.mode == 'fake': # Used during training and for evaluating real data
- for root, dirs, files in os.walk(self.directory):
- # if there are no subdirs, we have a datadir
- if not dirs:
- files.sort()
- datapoint = dict()
- # extract all files as channels
- for f in files:
- seqtype = f.split('-')[4].split('.')[0]
- datapoint[seqtype] = os.path.join(root, f)
- self.database.append(datapoint)
- else: # Used for evaluating fake data
- for root, dirs, files in os.walk(self.directory):
- for f in files:
- datapoint = dict()
- datapoint['t1n'] = os.path.join(root, f)
- self.database.append(datapoint)
-
- def __getitem__(self, x):
- filedict = self.database[x]
- name = filedict['t1n']
- nib_img = nibabel.load(name) # We only use t1 weighted images
- out = nib_img.get_fdata()
-
- if not self.mode == 'fake':
- # CLip and normalize the images
- out_clipped = np.clip(out, np.quantile(out, 0.001), np.quantile(out, 0.999))
- out_normalized = (out_clipped - np.min(out_clipped)) / (np.max(out_clipped) - np.min(out_clipped))
- out = torch.tensor(out_normalized)
-
- # Zero pad images
- image = torch.zeros(1, 256, 256, 256)
- image[:, 8:-8, 8:-8, 50:-51] = out
-
- # Downsampling
- if self.img_size == 128:
- downsample = nn.AvgPool3d(kernel_size=2, stride=2)
- image = downsample(image)
- else:
- image = torch.tensor(out, dtype=torch.float32)
- image = image.unsqueeze(dim=0)
-
- # Normalization
- image = self.normalize(image)
-
- if self.mode == 'fake':
- return image, name
- else:
- return image
-
- def __len__(self):
- return len(self.database)
diff --git a/wdm-3d-initial/guided_diffusion/dist_util.py b/wdm-3d-initial/guided_diffusion/dist_util.py
deleted file mode 100644
index c2385710ce2a404050021411fd6418cdf9548b02..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/dist_util.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Helpers for distributed training.
-"""
-
-import io
-import os
-import socket
-
-import blobfile as bf
-import torch as th
-import torch.distributed as dist
-
-# Change this to reflect your cluster layout.
-# The GPU for a given rank is (rank % GPUS_PER_NODE).
-GPUS_PER_NODE = 8
-
-SETUP_RETRY_COUNT = 3
-
-
-def setup_dist(devices=(0,)):
- """
- Setup a distributed process group.
- """
- if dist.is_initialized():
- return
- try:
- device_string = ','.join(map(str, devices))
- except TypeError:
- device_string = str(devices)
- os.environ["CUDA_VISIBLE_DEVICES"] = device_string #f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
-
- #comm = MPI.COMM_WORLD
- # print('commworld, 'f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}", comm)
- backend = "gloo" if not th.cuda.is_available() else "nccl"
- # print('commrank', comm.rank)
- # print('commsize', comm.size)
-
- if backend == "gloo":
- hostname = "localhost"
- else:
- hostname = socket.gethostbyname(socket.getfqdn())
- os.environ["MASTER_ADDR"] = '127.0.1.1'#comm.bcast(hostname, root=0)
- os.environ["RANK"] = '0'#str(comm.rank)
- os.environ["WORLD_SIZE"] = '1'#str(comm.size)
-
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("", 0))
- s.listen(1)
- port = s.getsockname()[1]
- s.close()
- # print('port2', port)
- os.environ["MASTER_PORT"] = str(port)
- dist.init_process_group(backend=backend, init_method="env://")
-
-
-def dev(device_number=0):
- """
- Get the device to use for torch.distributed.
- """
- if isinstance(device_number, (list, tuple)): # multiple devices specified
- return [dev(k) for k in device_number] # recursive call
- if th.cuda.is_available():
- device_count = th.cuda.device_count()
- if device_count == 1:
- return th.device(f"cuda")
- else:
- if device_number < device_count: # if we specify multiple devices, we have to be specific
- return th.device(f'cuda:{device_number}')
- else:
- raise ValueError(f'requested device number {device_number} (0-indexed) but only {device_count} devices available')
- return th.device("cpu")
-
-
-def load_state_dict(path, **kwargs):
- """
- Load a PyTorch file without redundant fetches across MPI ranks.
- """
- #print('mpicommworldgetrank', MPI.COMM_WORLD.Get_rank())
- mpigetrank=0
- # if MPI.COMM_WORLD.Get_rank() == 0:
- if mpigetrank==0:
- with bf.BlobFile(path, "rb") as f:
- data = f.read()
- else:
- data = None
- # data = MPI.COMM_WORLD.bcast(data)
- # print('mpibacst', MPI.COMM_WORLD.bcast(data))
- return th.load(io.BytesIO(data), **kwargs)
-
-
-def sync_params(params):
- """
- Synchronize a sequence of Tensors across ranks from rank 0.
- """
- #for p in params:
- # with th.no_grad():
- # dist.broadcast(p, 0)
-
-
-def _find_free_port():
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("", 0))
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- return s.getsockname()[1]
- finally:
- s.close()
diff --git a/wdm-3d-initial/guided_diffusion/gaussian_diffusion.py b/wdm-3d-initial/guided_diffusion/gaussian_diffusion.py
deleted file mode 100644
index 73ade109a16d3eb91588b5f53f7e05c82407aeed..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/gaussian_diffusion.py
+++ /dev/null
@@ -1,1185 +0,0 @@
-"""
-This code started out as a PyTorch port of Ho et al's diffusion models:
-https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
-
-Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
-"""
-from PIL import Image
-from torch.autograd import Variable
-import enum
-import torch.nn.functional as F
-from torchvision.utils import save_image
-import torch
-import math
-import numpy as np
-import torch as th
-from .train_util import visualize
-from .nn import mean_flat
-from .losses import normal_kl, discretized_gaussian_log_likelihood
-from scipy import ndimage
-from torchvision import transforms
-import matplotlib.pyplot as plt
-from scipy.interpolate import interp1d
-
-from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
-
-dwt = DWT_3D('haar')
-idwt = IDWT_3D('haar')
-
-
-def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
- """
- Get a pre-defined beta schedule for the given name.
-
- The beta schedule library consists of beta schedules which remain similar
- in the limit of num_diffusion_timesteps.
- Beta schedules may be added, but should not be removed or changed once
- they are committed to maintain backwards compatibility.
- """
- if schedule_name == "linear":
- # Linear schedule from Ho et al, extended to work for any number of
- # diffusion steps.
- scale = 1000 / num_diffusion_timesteps
- beta_start = scale * 0.0001
- beta_end = scale * 0.02
- return np.linspace(
- beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
- )
- elif schedule_name == "cosine":
- return betas_for_alpha_bar(
- num_diffusion_timesteps,
- lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
- )
- else:
- raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
-
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-class ModelMeanType(enum.Enum):
- """
- Which type of output the model predicts.
- """
-
- PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
- START_X = enum.auto() # the model predicts x_0
- EPSILON = enum.auto() # the model predicts epsilon
-
-
-class ModelVarType(enum.Enum):
- """
- What is used as the model's output variance.
-
- The LEARNED_RANGE option has been added to allow the model to predict
- values between FIXED_SMALL and FIXED_LARGE, making its job easier.
- """
-
- LEARNED = enum.auto()
- FIXED_SMALL = enum.auto()
- FIXED_LARGE = enum.auto()
- LEARNED_RANGE = enum.auto()
-
-
-class LossType(enum.Enum):
- MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
- RESCALED_MSE = (
- enum.auto()
- ) # use raw MSE loss (with RESCALED_KL when learning variances)
- KL = enum.auto() # use the variational lower-bound
- RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
-
- def is_vb(self):
- return self == LossType.KL or self == LossType.RESCALED_KL
-
-
-class GaussianDiffusion:
- """
- Utilities for training and sampling diffusion models.
-
- Ported directly from here, and then adapted over time to further experimentation.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
-
- :param betas: a 1-D numpy array of betas for each diffusion timestep,
- starting at T and going to 1.
- :param model_mean_type: a ModelMeanType determining what the model outputs.
- :param model_var_type: a ModelVarType determining how variance is output.
- :param loss_type: a LossType determining the loss function to use.
- :param rescale_timesteps: if True, pass floating point timesteps into the
- model so that they are always scaled like in the
- original paper (0 to 1000).
- """
-
- def __init__(
- self,
- *,
- betas,
- model_mean_type,
- model_var_type,
- loss_type,
- rescale_timesteps=False,
- mode='default',
- loss_level='image'
- ):
- self.model_mean_type = model_mean_type
- self.model_var_type = model_var_type
- self.loss_type = loss_type
- self.rescale_timesteps = rescale_timesteps
- self.mode = mode
- self.loss_level=loss_level
-
- # Use float64 for accuracy.
- betas = np.array(betas, dtype=np.float64)
- self.betas = betas
- assert len(betas.shape) == 1, "betas must be 1-D"
- assert (betas > 0).all() and (betas <= 1).all()
-
- self.num_timesteps = int(betas.shape[0])
-
- alphas = 1.0 - betas
- self.alphas_cumprod = np.cumprod(alphas, axis=0) # t
- self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) # t-1
- self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) # t+1
- assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
- self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
- self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
- self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
- self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
-
- # calculations for posterior q(x_{t-1} | x_t, x_0)
- self.posterior_variance = (
- betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- # log calculation clipped because the posterior variance is 0 at the
- # beginning of the diffusion chain.
- self.posterior_log_variance_clipped = np.log(
- np.append(self.posterior_variance[1], self.posterior_variance[1:])
- )
- self.posterior_mean_coef1 = (
- betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- self.posterior_mean_coef2 = (
- (1.0 - self.alphas_cumprod_prev)
- * np.sqrt(alphas)
- / (1.0 - self.alphas_cumprod)
- )
-
- def q_mean_variance(self, x_start, t):
- """
- Get the distribution q(x_t | x_0).
-
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
- """
- mean = (
- _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- )
- variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
- log_variance = _extract_into_tensor(
- self.log_one_minus_alphas_cumprod, t, x_start.shape
- )
- return mean, variance, log_variance
-
- def q_sample(self, x_start, t, noise=None):
- """
- Diffuse the data for a given number of diffusion steps.
-
- In other words, sample from q(x_t | x_0).
-
- :param x_start: the initial data batch.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :param noise: if specified, the split-out normal noise.
- :return: A noisy version of x_start.
- """
- if noise is None:
- noise = th.randn_like(x_start)
- assert noise.shape == x_start.shape
- return (
- _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
- * noise
- )
-
- def q_posterior_mean_variance(self, x_start, x_t, t):
- """
- Compute the mean and variance of the diffusion posterior:
-
- q(x_{t-1} | x_t, x_0)
-
- """
-
- assert x_start.shape == x_t.shape
- posterior_mean = (
- _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
- + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
- )
- posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
- posterior_log_variance_clipped = _extract_into_tensor(
- self.posterior_log_variance_clipped, t, x_t.shape
- )
- assert (
- posterior_mean.shape[0]
- == posterior_variance.shape[0]
- == posterior_log_variance_clipped.shape[0]
- == x_start.shape[0]
- )
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
-
- def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
- """
- Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
- the initial x, x_0.
- :param model: the model, which takes a signal and a batch of timesteps
- as input.
- :param x: the [N x C x ...] tensor at time t.
- :param t: a 1-D Tensor of timesteps.
- :param clip_denoised: if True, clip the denoised signal into [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample. Applies before
- clip_denoised.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict with the following keys:
- - 'mean': the model mean output.
- - 'variance': the model variance output.
- - 'log_variance': the log of 'variance'.
- - 'pred_xstart': the prediction for x_0.
- """
- if model_kwargs is None:
- model_kwargs = {}
-
- B, C = x.shape[:2]
-
- assert t.shape == (B,)
- model_output = model(x, self._scale_timesteps(t), **model_kwargs)
-
- if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
- assert model_output.shape == (B, C * 2, *x.shape[2:])
- model_output, model_var_values = th.split(model_output, C, dim=1)
- if self.model_var_type == ModelVarType.LEARNED:
- model_log_variance = model_var_values
- model_variance = th.exp(model_log_variance)
- else:
- min_log = _extract_into_tensor(
- self.posterior_log_variance_clipped, t, x.shape
- )
- max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
- # The model_var_values is [-1, 1] for [min_var, max_var].
- frac = (model_var_values + 1) / 2
- model_log_variance = frac * max_log + (1 - frac) * min_log
- model_variance = th.exp(model_log_variance)
- else:
- model_variance, model_log_variance = {
- # for fixedlarge, we set the initial (log-)variance like so
- # to get a better decoder log likelihood.
- ModelVarType.FIXED_LARGE: (
- np.append(self.posterior_variance[1], self.betas[1:]),
- np.log(np.append(self.posterior_variance[1], self.betas[1:])),
- ),
- ModelVarType.FIXED_SMALL: (
- self.posterior_variance,
- self.posterior_log_variance_clipped,
- ),
- }[self.model_var_type]
- model_variance = _extract_into_tensor(model_variance, t, x.shape)
- model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
-
- def process_xstart(x):
- if denoised_fn is not None:
- x = denoised_fn(x)
- if clip_denoised:
- B, _, H, W, D = x.size()
- x_idwt = idwt(x[:, 0, :, :, :].view(B, 1, H, W, D) * 3.,
- x[:, 1, :, :, :].view(B, 1, H, W, D),
- x[:, 2, :, :, :].view(B, 1, H, W, D),
- x[:, 3, :, :, :].view(B, 1, H, W, D),
- x[:, 4, :, :, :].view(B, 1, H, W, D),
- x[:, 5, :, :, :].view(B, 1, H, W, D),
- x[:, 6, :, :, :].view(B, 1, H, W, D),
- x[:, 7, :, :, :].view(B, 1, H, W, D))
-
- x_idwt_clamp = x_idwt.clamp(-1, 1)
-
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(x_idwt_clamp)
- x = th.cat([LLL / 3., LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1)
-
- return x
- return x
-
- if self.model_mean_type == ModelMeanType.PREVIOUS_X:
- pred_xstart = process_xstart(
- self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
- )
- model_mean = model_output
- elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
- if self.model_mean_type == ModelMeanType.START_X:
- pred_xstart = process_xstart(model_output)
- else:
- pred_xstart = process_xstart(
- self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
- )
- model_mean, _, _ = self.q_posterior_mean_variance(
- x_start=pred_xstart, x_t=x, t=t
- )
- else:
- raise NotImplementedError(self.model_mean_type)
-
- assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
-
-
- return {
- "mean": model_mean,
- "variance": model_variance,
- "log_variance": model_log_variance,
- "pred_xstart": pred_xstart,
- }
-
- def _predict_xstart_from_eps(self, x_t, t, eps):
- assert x_t.shape == eps.shape
- return (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
- )
-
- def _predict_xstart_from_xprev(self, x_t, t, xprev):
- assert x_t.shape == xprev.shape
- return ( # (xprev - coef2*x_t) / coef1
- _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- - _extract_into_tensor(
- self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
- )
- * x_t
- )
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- if self.mode == 'segmentation':
- x_t = x_t[:, -pred_xstart.shape[1]:, ...]
- assert pred_xstart.shape == x_t.shape
- eps = (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- - pred_xstart
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
- return eps
-
- def _scale_timesteps(self, t):
- if self.rescale_timesteps:
- return t.float() * (1000.0 / self.num_timesteps)
- return t
-
- def condition_mean(self, cond_fn, p_mean_var, x, t, update=None, model_kwargs=None):
- """
- Compute the mean for the previous step, given a function cond_fn that
- computes the gradient of a conditional log probability with respect to
- x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
- condition on y.
-
- This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
- """
-
-
- if update is not None:
- print('CONDITION MEAN UPDATE NOT NONE')
-
- new_mean = (
- p_mean_var["mean"].detach().float() + p_mean_var["variance"].detach() * update.float()
- )
- a=update
-
- else:
- a, gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
- new_mean = (
- p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
- )
-
- return a, new_mean
-
-
-
- def condition_score2(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
- """
- Compute what the p_mean_variance output would have been, should the
- model's score function be conditioned by cond_fn.
- See condition_mean() for details on cond_fn.
- Unlike condition_mean(), this instead uses the conditioning strategy
- from Song et al (2020).
- """
- t=t.long()
- alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
-
- eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
- a, cfn= cond_fn(
- x, self._scale_timesteps(t).long(), **model_kwargs
- )
- eps = eps - (1 - alpha_bar).sqrt() * cfn
-
- out = p_mean_var.copy()
- out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
- out["mean"], _, _ = self.q_posterior_mean_variance(
- x_start=out["pred_xstart"], x_t=x, t=t
- )
- return out, cfn
-
- def sample_known(self, img, batch_size = 1):
- image_size = self.image_size
- channels = self.channels
- return self.p_sample_loop_known(model,(batch_size, channels, image_size, image_size), img)
-
-
- def p_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=True,
- ):
- """
- Generate samples from the model.
-
- :param model: the model module.
- :param shape: the shape of the samples, (N, C, H, W).
- :param noise: if specified, the noise from the encoder to sample.
- Should be of the same shape as `shape`.
- :param clip_denoised: if True, clip x_start predictions to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :param device: if specified, the device to create the samples on.
- If not specified, use a model parameter's device.
- :param progress: if True, show a tqdm progress bar.
- :return: a non-differentiable batch of samples.
- """
- final = None
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"]
-
- def p_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- ):
- """
- Sample x_{t-1} from the model at the given timestep.
- :param model: the model to sample from.
- :param x: the current tensor at x_{t-1}.
- :param t: the value of t, starting at 0 for the first diffusion step.
- :param clip_denoised: if True, clip the x_start prediction to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict containing the following keys:
- - 'sample': a random sample from the model.
- - 'pred_xstart': a prediction of x_0.
- """
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- noise = th.randn_like(x)
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
- ) # no noise when t == 0
- if cond_fn is not None:
- out["mean"] = self.condition_mean(
- cond_fn, out, x, t, model_kwargs=model_kwargs
- )
- sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
- return {"sample": sample, "pred_xstart": out["pred_xstart"]}
-
- def p_sample_loop_known(
- self,
- model,
- shape,
- img,
- org=None,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- noise_level=500,
- progress=False,
- classifier=None
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
-
-
- t = th.randint(499,500, (b,), device=device).long().to(device)
-
- org=img[0].to(device)
- img=img[0].to(device)
- indices = list(range(t))[::-1]
- noise = th.randn_like(img[:, :4, ...]).to(device)
- x_noisy = self.q_sample(x_start=img[:, :4, ...], t=t, noise=noise).to(device)
- x_noisy = torch.cat((x_noisy, img[:, 4:, ...]), dim=1)
-
-
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- time=noise_level,
- noise=x_noisy,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- org=org,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- classifier=classifier
- ):
- final = sample
-
- return final["sample"], x_noisy, img
-
- def p_sample_loop_interpolation(
- self,
- model,
- shape,
- img1,
- img2,
- lambdaint,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- t = th.randint(299,300, (b,), device=device).long().to(device)
- img1=torch.tensor(img1).to(device)
- img2 = torch.tensor(img2).to(device)
- noise = th.randn_like(img1).to(device)
- x_noisy1 = self.q_sample(x_start=img1, t=t, noise=noise).to(device)
- x_noisy2 = self.q_sample(x_start=img2, t=t, noise=noise).to(device)
- interpol=lambdaint*x_noisy1+(1-lambdaint)*x_noisy2
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- time=t,
- noise=interpol,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"], interpol, img1, img2
-
-
- def p_sample_loop_progressive(
- self,
- model,
- shape,
- time=1000,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=True,
- ):
- """
- Generate samples from the model and yield intermediate samples from
- each timestep of diffusion.
-
- Arguments are the same as p_sample_loop().
- Returns a generator over dicts, where each dict is the return value of
- p_sample().
- """
-
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- img = th.randn(*shape, device=device)
-
- indices = list(range(time))[::-1]
- if progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
- indices = tqdm(indices)
-
- for i in indices:
- t = th.tensor([i] * shape[0], device=device)
- with th.no_grad():
- out = self.p_sample(
- model,
- img,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- )
- yield out
- img = out["sample"]
-
- def ddim_sample(
- self,
- model,
- x,
- t, # index of current step
- t_cpu=None,
- t_prev=None, # index of step that we are going to compute, only used for heun
- t_prev_cpu=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- eta=0.0,
- sampling_steps=0,
- ):
- """
- Sample x_{t-1} from the model using DDIM.
- Same usage as p_sample().
- """
- relerr = lambda x, y: (x-y).abs().sum() / y.abs().sum()
- if cond_fn is not None:
- out, saliency = self.condition_score2(cond_fn, out, x, t, model_kwargs=model_kwargs)
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- eps_orig = self._predict_eps_from_xstart(x_t=x, t=t, pred_xstart=out["pred_xstart"])
- if self.mode == 'default':
- shape = x.shape
- elif self.mode == 'segmentation':
- shape = eps_orig.shape
- else:
- raise NotImplementedError(f'mode "{self.mode}" not implemented')
-
- if not sampling_steps:
- alpha_bar_orig = _extract_into_tensor(self.alphas_cumprod, t, shape)
- alpha_bar_prev_orig = _extract_into_tensor(self.alphas_cumprod_prev, t, shape)
- else:
- xp = np.arange(0, 1000, 1, dtype=np.float)
- alpha_cumprod_fun = interp1d(xp, self.alphas_cumprod,
- bounds_error=False,
- fill_value=(self.alphas_cumprod[0], self.alphas_cumprod[-1]),
- )
- alpha_bar_orig = alpha_cumprod_fun(t_cpu).item()
- alpha_bar_prev_orig = alpha_cumprod_fun(t_prev_cpu).item()
- sigma = (
- eta
- * ((1 - alpha_bar_prev_orig) / (1 - alpha_bar_orig))**.5
- * (1 - alpha_bar_orig / alpha_bar_prev_orig)**.5
- )
- noise = th.randn(size=shape, device=x.device)
- mean_pred = (
- out["pred_xstart"] * alpha_bar_prev_orig**.5
- + (1 - alpha_bar_prev_orig - sigma ** 2)**.5 * eps_orig
- )
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(shape) - 1)))
- )
- sample = mean_pred + nonzero_mask * sigma * noise
- return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
-
-
- def ddim_reverse_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- model_kwargs=None,
- eta=0.0,
- ):
- """
- Sample x_{t+1} from the model using DDIM reverse ODE.
- """
- assert eta == 0.0, "Reverse ODE only for deterministic path"
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- # Usually our model outputs epsilon, but we re-derive it
- # in case we used x_start or x_prev prediction.
- eps = (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- - out["pred_xstart"]
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
- alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
-
- # Equation 12. reversed
- mean_pred = (
- out["pred_xstart"] * th.sqrt(alpha_bar_next)
- + th.sqrt(1 - alpha_bar_next) * eps
- )
-
- return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
-
-
-
- def ddim_sample_loop_interpolation(
- self,
- model,
- shape,
- img1,
- img2,
- lambdaint,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- t = th.randint(199,200, (b,), device=device).long().to(device)
- img1=torch.tensor(img1).to(device)
- img2 = torch.tensor(img2).to(device)
- noise = th.randn_like(img1).to(device)
- x_noisy1 = self.q_sample(x_start=img1, t=t, noise=noise).to(device)
- x_noisy2 = self.q_sample(x_start=img2, t=t, noise=noise).to(device)
- interpol=lambdaint*x_noisy1+(1-lambdaint)*x_noisy2
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- time=t,
- noise=interpol,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"], interpol, img1, img2
-
- def ddim_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- sampling_steps=0,
- ):
- """
- Generate samples from the model using DDIM.
-
- Same usage as p_sample_loop().
- """
- final = None
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- #t = th.randint(0,1, (b,), device=device).long().to(device)
- t = 1000
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- time=t,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- eta=eta,
- sampling_steps=sampling_steps,
- ):
-
- final = sample
- return final["sample"]
-
-
-
- def ddim_sample_loop_known(
- self,
- model,
- shape,
- img,
- mode='default',
- org=None,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- noise_level=1000, # must be same as in training
- progress=False,
- conditioning=False,
- conditioner=None,
- classifier=None,
- eta=0.0,
- sampling_steps=0,
- ):
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- b = shape[0]
- t = th.randint(0,1, (b,), device=device).long().to(device)
- img = img.to(device)
-
- indices = list(range(t))[::-1]
- if mode == 'segmentation':
- noise = None
- x_noisy = None
- elif mode == 'default':
- noise = None
- x_noisy = None
- else:
- raise NotImplementedError(f'mode "{mode}" not implemented')
-
- final = None
- # pass images to be segmented as condition
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- segmentation_img=img, # image to be segmented
- time=noise_level,
- noise=x_noisy,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- eta=eta,
- sampling_steps=sampling_steps,
- ):
- final = sample
-
- return final["sample"], x_noisy, img
-
-
- def ddim_sample_loop_progressive(
- self,
- model,
- shape,
- segmentation_img=None, # define to perform segmentation
- time=1000,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- sampling_steps=0,
- ):
- """
- Use DDIM to sample from the model and yield intermediate samples from
- each timestep of DDIM.
-
- Same usage as p_sample_loop_progressive().
- """
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- if segmentation_img is None: # normal sampling
- img = th.randn(*shape, device=device)
- else: # segmentation mode
- label_shape = (segmentation_img.shape[0], model.out_channels, *segmentation_img.shape[2:])
- img = th.randn(label_shape, dtype=segmentation_img.dtype, device=segmentation_img.device)
-
- indices = list(range(time))[::-1] # klappt nur für batch_size == 1
-
-
- if sampling_steps:
- tmp = np.linspace(999, 0, sampling_steps)
- tmp = np.append(tmp, -tmp[-2])
- indices = tmp[:-1].round().astype(np.int)
- indices_prev = tmp[1:].round().astype(np.int)
- else:
- indices_prev = [i-1 for i in indices]
-
- if True: #progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
-
- indices = tqdm(indices)
-
- for i, i_prev in zip(indices, indices_prev): # 1000 -> 0
- if segmentation_img is not None:
- prev_img = img
- img = th.cat((segmentation_img, img), dim=1)
- t = th.tensor([i] * shape[0], device=device)
- t_prev = th.tensor([i_prev] * shape[0], device=device)
- with th.no_grad():
- out = self.ddim_sample(
- model,
- img,
- t,
- t_cpu=i,
- t_prev=t_prev,
- t_prev_cpu=i_prev,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- eta=eta,
- sampling_steps=sampling_steps,
- )
- yield out
- img = out["sample"]
-
- def _vb_terms_bpd(
- self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
- ):
- """
- Get a term for the variational lower-bound.
-
- The resulting units are bits (rather than nats, as one might expect).
- This allows for comparison to other papers.
-
- :return: a dict with the following keys:
- - 'output': a shape [N] tensor of NLLs or KLs.
- - 'pred_xstart': the x_0 predictions.
- """
- true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
- x_start=x_start, x_t=x_t, t=t
- )
- out = self.p_mean_variance(
- model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
- )
- kl = normal_kl(
- true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
- )
- kl = mean_flat(kl) / np.log(2.0)
-
- decoder_nll = -discretized_gaussian_log_likelihood(
- x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
- )
- assert decoder_nll.shape == x_start.shape
- decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
-
- # At the first timestep return the decoder NLL,
- # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
- output = th.where((t == 0), decoder_nll, kl)
- return {"output": output, "pred_xstart": out["pred_xstart"]}
-
- def training_losses(self, model, x_start, t, classifier=None, model_kwargs=None, noise=None, labels=None,
- mode='default'):
- """
- Compute training losses for a single timestep.
- :param model: the model to evaluate loss on.
- :param x_start: the [N x C x ...] tensor of inputs - original image resolution.
- :param t: a batch of timestep indices.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :param noise: if specified, the specific Gaussian noise to try to remove.
- :param labels: must be specified for mode='segmentation'
- :param mode: can be default (image generation), segmentation
- :return: a dict with the key "loss" containing a tensor of shape [N].
- Some mean or variance settings may also have other keys.
- """
- if model_kwargs is None:
- model_kwargs = {}
-
- # Wavelet transform the input image
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(x_start)
- x_start_dwt = th.cat([LLL / 3., LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1)
-
- if mode == 'default':
- noise = th.randn_like(x_start) # Sample noise - original image resolution.
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = dwt(noise)
- noise_dwt = th.cat([LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH], dim=1) # Wavelet transformed noise
- x_t = self.q_sample(x_start_dwt, t, noise=noise_dwt) # Sample x_t
-
- else:
- raise ValueError(f'Invalid mode {mode=}, needs to be "default"')
-
- model_output = model(x_t, self._scale_timesteps(t), **model_kwargs) # Model outputs denoised wavelet subbands
-
- # Inverse wavelet transform the model output
- B, _, H, W, D = model_output.size()
- model_output_idwt = idwt(model_output[:, 0, :, :, :].view(B, 1, H, W, D) * 3.,
- model_output[:, 1, :, :, :].view(B, 1, H, W, D),
- model_output[:, 2, :, :, :].view(B, 1, H, W, D),
- model_output[:, 3, :, :, :].view(B, 1, H, W, D),
- model_output[:, 4, :, :, :].view(B, 1, H, W, D),
- model_output[:, 5, :, :, :].view(B, 1, H, W, D),
- model_output[:, 6, :, :, :].view(B, 1, H, W, D),
- model_output[:, 7, :, :, :].view(B, 1, H, W, D))
-
- terms = {"mse_wav": th.mean(mean_flat((x_start_dwt - model_output) ** 2), dim=0)}
-
- return terms, model_output, model_output_idwt
-
-
- def _prior_bpd(self, x_start):
- """
- Get the prior KL term for the variational lower-bound, measured in
- bits-per-dim.
-
- This term can't be optimized, as it only depends on the encoder.
-
- :param x_start: the [N x C x ...] tensor of inputs.
- :return: a batch of [N] KL values (in bits), one per batch element.
- """
- batch_size = x_start.shape[0]
- t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
- kl_prior = normal_kl(
- mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
- )
- return mean_flat(kl_prior) / np.log(2.0)
-
- def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
- """
- Compute the entire variational lower-bound, measured in bits-per-dim,
- as well as other related quantities.
-
- :param model: the model to evaluate loss on.
- :param x_start: the [N x C x ...] tensor of inputs.
- :param clip_denoised: if True, clip denoised samples.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
-
- :return: a dict containing the following keys:
- - total_bpd: the total variational lower-bound, per batch element.
- - prior_bpd: the prior term in the lower-bound.
- - vb: an [N x T] tensor of terms in the lower-bound.
- - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- - mse: an [N x T] tensor of epsilon MSEs for each timestep.
- """
- device = x_start.device
- batch_size = x_start.shape[0]
-
- vb = []
- xstart_mse = []
- mse = []
- for t in list(range(self.num_timesteps))[::-1]:
- t_batch = th.tensor([t] * batch_size, device=device)
- noise = th.randn_like(x_start)
- x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
-
- # Calculate VLB term at the current timestep
- with th.no_grad():
- out = self._vb_terms_bptimestepsd(
- model,
- x_start=x_start,
- x_t=x_t,
- t=t_batch,
- clip_denoised=clip_denoised,
- model_kwargs=model_kwargs,
- )
- vb.append(out["output"])
- xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
- eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
- mse.append(mean_flat((eps - noise) ** 2))
-
- vb = th.stack(vb, dim=1)
- xstart_mse = th.stack(xstart_mse, dim=1)
- mse = th.stack(mse, dim=1)
-
- prior_bpd = self._prior_bpd(x_start)
- total_bpd = vb.sum(dim=1) + prior_bpd
- return {
- "total_bpd": total_bpd,
- "prior_bpd": prior_bpd,
- "vb": vb,
- "xstart_mse": xstart_mse,
- "mse": mse,
- }
-
-
-def _extract_into_tensor(arr, timesteps, broadcast_shape):
- """
- Extract values from a 1-D numpy array for a batch of indices.
-
- :param arr: the 1-D numpy array.
- :param timesteps: a tensor of indices into the array to extract.
- :param broadcast_shape: a larger shape of K dimensions with the batch
- dimension equal to the length of timesteps.
- :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
- """
- res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
- while len(res.shape) < len(broadcast_shape):
- res = res[..., None]
- return res.expand(broadcast_shape)
diff --git a/wdm-3d-initial/guided_diffusion/inpaintloader.py b/wdm-3d-initial/guided_diffusion/inpaintloader.py
deleted file mode 100644
index db8e01e8ae99966471431e1fbef9039781688bce..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/inpaintloader.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import os, nibabel, torch, numpy as np
-import torch.nn as nn
-from torch.utils.data import Dataset
-import pandas as pd
-import re
-
-
-class InpaintVolumes(Dataset):
- """
- Y : float32 [C, D, H, W] full multi-modal MRI stack
- M : float32 [1, D, H, W] binary in-painting mask (shared by all mods)
- Y_void = Y * (1 - M) context with lesion region blanked
- name : identifier string
- """
-
- # ------------------------------------------------------------
- def __init__(self,
- root_dir: str,
- subset: str = 'train', # 'train' | 'val'
- img_size: int = 256, # 128 or 256 cube
- modalities: tuple = ('T1w',), # order defines channel order
- normalize=None):
- super().__init__()
- self.root_dir = os.path.expanduser(root_dir)
- self.subset = subset
- self.img_size = img_size
- self.modalities = modalities
- self.normalize = normalize or (lambda x: x)
- self.cases = self._index_cases() # ⇒ list[dict]
-
- # ------------------------------------------------------------
- def _index_cases(self):
- """
- Build a list like:
- {'img': {'T1w': path, 'FLAIR': path, ...},
- 'mask': path,
- 'name': case_id}
- Edit only this block to suit your folder / filename scheme.
- """
- cases = []
-
- # metadata
- df = pd.read_csv(f"{self.root_dir}/participants.tsv", sep="\t")
- # update with new splits
-
- # filter FCD samples
- fcd_df = df[df['group'] == 'fcd'].copy()
- # shuffle indices
- fcd_df = fcd_df.sample(frac=1, random_state=42).reset_index(drop=True)
- # compute split index
- n_train = int(len(fcd_df) * 0.9)
- # assign split labels
- fcd_df.loc[:n_train-1, 'split'] = 'train'
- fcd_df.loc[n_train:, 'split'] = 'val'
- #update
- df.loc[fcd_df.index, 'split'] = fcd_df['split']
-
- missing = []
-
- for participant_id in df[(df['split']==self.subset) & (df['group']=='fcd')]['participant_id']:
- case_dir = f"{self.root_dir}/{participant_id}/anat/"
- files = os.listdir(case_dir)
-
- img_dict = {}
- for mod in self.modalities:
- pattern = re.compile(rf"^{re.escape(participant_id)}.*{re.escape(mod)}\.nii\.gz$")
- matches = [f for f in files if pattern.match(f)]
- assert matches, f"Missing {mod} for {participant_id} in {case_dir}"
- img_dict[mod] = os.path.join(case_dir, matches[0])
-
- mask_matches = [f for f in files if re.match(rf"^{re.escape(participant_id)}.*roi\.nii\.gz$", f)]
- mask_path = os.path.join(case_dir, mask_matches[0])
-
- cases.append({'img': img_dict, 'mask': mask_path, 'name': participant_id})
-
- return cases
-
- # ------------------------------------------------------------
- def _pad_to_cube(self, vol, fill=0.0):
- """Symmetric 3-D pad to [img_size³]. `vol` is [*, D, H, W]."""
- D, H, W = vol.shape[-3:]
- pad_D, pad_H, pad_W = self.img_size - D, self.img_size - H, self.img_size - W
- pad = (pad_W // 2, pad_W - pad_W // 2,
- pad_H // 2, pad_H - pad_H // 2,
- pad_D // 2, pad_D - pad_D // 2)
- return nn.functional.pad(vol, pad, value=fill)
-
- # ------------------------------------------------------------
- def __getitem__(self, idx):
- rec = self.cases[idx]
- name = rec['name']
-
- # ---------- load C modalities --------------------------
- vols = []
- for mod in self.modalities:
- mod_path = rec['img'][mod]
- arr = nibabel.load(mod_path).get_fdata().astype(np.float32)
-
- # robust min-max clipping and normalization
- lo, hi = np.quantile(arr, [0.001, 0.999])
- arr = np.clip(arr, lo, hi)
- arr = (arr - lo) / (hi - lo + 1e-6)
-
- vols.append(torch.from_numpy(arr))
-
- first_mod = self.modalities[0]
- nii_obj = nibabel.load(rec['img'][first_mod])
- affine = nii_obj.affine
-
- Y = torch.stack(vols, dim=0) # [C, D, H, W]
-
- # ---------- load mask ----------------------------------
- M_arr = nibabel.load(rec['mask']).get_fdata().astype(np.uint8)
- M = torch.from_numpy(M_arr).unsqueeze(0) # [1, D, H, W]
- M = (M > 0).to(Y.dtype)
-
- # ---------- pad (and optional downsample) --------------
- Y = self._pad_to_cube(Y, fill=0.0)
- M = self._pad_to_cube(M, fill=0.0)
- if self.img_size == 128:
- pool = nn.AvgPool3d(2, 2)
- Y = pool(Y); M = pool(M)
-
- # ---------- derive context image -----------------------
- Y_void = Y * (1 - M)
-
- return Y, M, Y_void, name, affine # shapes: [C, D, H, W], [1, D, H, W], [C, D, H, W], ...
-
- # ------------------------------------------------------------
- def __len__(self):
- return len(self.cases)
\ No newline at end of file
diff --git a/wdm-3d-initial/guided_diffusion/lidcloader.py b/wdm-3d-initial/guided_diffusion/lidcloader.py
deleted file mode 100644
index 294278919e92bb8df59cc1b4744505c2164cd37f..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/lidcloader.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.utils.data
-import os
-import os.path
-import nibabel
-
-
-class LIDCVolumes(torch.utils.data.Dataset):
- def __init__(self, directory, test_flag=False, normalize=None, mode='train', img_size=256):
- '''
- directory is expected to contain some folder structure:
- if some subfolder contains only files, all of these
- files are assumed to have the name: processed.nii.gz
- '''
- super().__init__()
- self.mode = mode
- self.directory = os.path.expanduser(directory)
- self.normalize = normalize or (lambda x: x)
- self.test_flag = test_flag
- self.img_size = img_size
- self.database = []
-
- if not self.mode == 'fake':
- for root, dirs, files in os.walk(self.directory):
- # if there are no subdirs, we have a datadir
- if not dirs:
- files.sort()
- datapoint = dict()
- # extract all files as channels
- for f in files:
- datapoint['image'] = os.path.join(root, f)
- if len(datapoint) != 0:
- self.database.append(datapoint)
- else:
- for root, dirs, files in os.walk(self.directory):
- for f in files:
- datapoint = dict()
- datapoint['image'] = os.path.join(root, f)
- self.database.append(datapoint)
-
- def __getitem__(self, x):
- filedict = self.database[x]
- name = filedict['image']
- nib_img = nibabel.load(name)
- out = nib_img.get_fdata()
-
- if not self.mode == 'fake':
- out = torch.Tensor(out)
-
- image = torch.zeros(1, 256, 256, 256)
- image[:, :, :, :] = out
-
- if self.img_size == 128:
- downsample = nn.AvgPool3d(kernel_size=2, stride=2)
- image = downsample(image)
- else:
- image = torch.tensor(out, dtype=torch.float32)
- image = image.unsqueeze(dim=0)
-
- # normalization
- image = self.normalize(image)
-
- if self.mode == 'fake':
- return image, name
- else:
- return image
-
- def __len__(self):
- return len(self.database)
diff --git a/wdm-3d-initial/guided_diffusion/logger.py b/wdm-3d-initial/guided_diffusion/logger.py
deleted file mode 100644
index 880e9b881716a811cc657a27ac498ea7f0ea83dd..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/logger.py
+++ /dev/null
@@ -1,495 +0,0 @@
-"""
-Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
-https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
-"""
-
-import os
-import sys
-import shutil
-import os.path as osp
-import json
-import time
-import datetime
-import tempfile
-import warnings
-from collections import defaultdict
-from contextlib import contextmanager
-
-DEBUG = 10
-INFO = 20
-WARN = 30
-ERROR = 40
-
-DISABLED = 50
-
-
-class KVWriter(object):
- def writekvs(self, kvs):
- raise NotImplementedError
-
-
-class SeqWriter(object):
- def writeseq(self, seq):
- raise NotImplementedError
-
-
-class HumanOutputFormat(KVWriter, SeqWriter):
- def __init__(self, filename_or_file):
- if isinstance(filename_or_file, str):
- self.file = open(filename_or_file, "wt")
- self.own_file = True
- else:
- assert hasattr(filename_or_file, "read"), (
- "expected file or str, got %s" % filename_or_file
- )
- self.file = filename_or_file
- self.own_file = False
-
- def writekvs(self, kvs):
- # Create strings for printing
- key2str = {}
- for (key, val) in sorted(kvs.items()):
- if hasattr(val, "__float__"):
- valstr = "%-8.3g" % val
- else:
- valstr = str(val)
- key2str[self._truncate(key)] = self._truncate(valstr)
-
- # Find max widths
- if len(key2str) == 0:
- print("WARNING: tried to write empty key-value dict")
- return
- else:
- keywidth = max(map(len, key2str.keys()))
- valwidth = max(map(len, key2str.values()))
-
- # Write out the data
- dashes = "-" * (keywidth + valwidth + 7)
- lines = [dashes]
- for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
- lines.append(
- "| %s%s | %s%s |"
- % (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
- )
- lines.append(dashes)
- self.file.write("\n".join(lines) + "\n")
-
- # Flush the output to the file
- self.file.flush()
-
- def _truncate(self, s):
- maxlen = 30
- return s[: maxlen - 3] + "..." if len(s) > maxlen else s
-
- def writeseq(self, seq):
- seq = list(seq)
- for (i, elem) in enumerate(seq):
- self.file.write(elem)
- if i < len(seq) - 1: # add space unless this is the last one
- self.file.write(" ")
- self.file.write("\n")
- self.file.flush()
-
- def close(self):
- if self.own_file:
- self.file.close()
-
-
-class JSONOutputFormat(KVWriter):
- def __init__(self, filename):
- self.file = open(filename, "wt")
-
- def writekvs(self, kvs):
- for k, v in sorted(kvs.items()):
- if hasattr(v, "dtype"):
- kvs[k] = float(v)
- self.file.write(json.dumps(kvs) + "\n")
- self.file.flush()
-
- def close(self):
- self.file.close()
-
-
-class CSVOutputFormat(KVWriter):
- def __init__(self, filename):
- self.file = open(filename, "w+t")
- self.keys = []
- self.sep = ","
-
- def writekvs(self, kvs):
- # Add our current row to the history
- extra_keys = list(kvs.keys() - self.keys)
- extra_keys.sort()
- if extra_keys:
- self.keys.extend(extra_keys)
- self.file.seek(0)
- lines = self.file.readlines()
- self.file.seek(0)
- for (i, k) in enumerate(self.keys):
- if i > 0:
- self.file.write(",")
- self.file.write(k)
- self.file.write("\n")
- for line in lines[1:]:
- self.file.write(line[:-1])
- self.file.write(self.sep * len(extra_keys))
- self.file.write("\n")
- for (i, k) in enumerate(self.keys):
- if i > 0:
- self.file.write(",")
- v = kvs.get(k)
- if v is not None:
- self.file.write(str(v))
- self.file.write("\n")
- self.file.flush()
-
- def close(self):
- self.file.close()
-
-
-class TensorBoardOutputFormat(KVWriter):
- """
- Dumps key/value pairs into TensorBoard's numeric format.
- """
-
- def __init__(self, dir):
- os.makedirs(dir, exist_ok=True)
- self.dir = dir
- self.step = 1
- prefix = "events"
- path = osp.join(osp.abspath(dir), prefix)
- import tensorflow as tf
- from tensorflow.python import pywrap_tensorflow
- from tensorflow.core.util import event_pb2
- from tensorflow.python.util import compat
-
- self.tf = tf
- self.event_pb2 = event_pb2
- self.pywrap_tensorflow = pywrap_tensorflow
- self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
-
- def writekvs(self, kvs):
- def summary_val(k, v):
- kwargs = {"tag": k, "simple_value": float(v)}
- return self.tf.Summary.Value(**kwargs)
-
- summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
- event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
- event.step = (
- self.step
- ) # is there any reason why you'd want to specify the step?
- self.writer.WriteEvent(event)
- self.writer.Flush()
- self.step += 1
-
- def close(self):
- if self.writer:
- self.writer.Close()
- self.writer = None
-
-
-def make_output_format(format, ev_dir, log_suffix=""):
- os.makedirs(ev_dir, exist_ok=True)
- if format == "stdout":
- return HumanOutputFormat(sys.stdout)
- elif format == "log":
- return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
- elif format == "json":
- return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
- elif format == "csv":
- return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
- elif format == "tensorboard":
- return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
- else:
- raise ValueError("Unknown format specified: %s" % (format,))
-
-
-# ================================================================
-# API
-# ================================================================
-
-
-def logkv(key, val):
- """
- Log a value of some diagnostic
- Call this once for each diagnostic quantity, each iteration
- If called many times, last value will be used.
- """
- get_current().logkv(key, val)
-
-
-def logkv_mean(key, val):
- """
- The same as logkv(), but if called many times, values averaged.
- """
- get_current().logkv_mean(key, val)
-
-
-def logkvs(d):
- """
- Log a dictionary of key-value pairs
- """
- for (k, v) in d.items():
- logkv(k, v)
-
-
-def dumpkvs():
- """
- Write all of the diagnostics from the current iteration
- """
- return get_current().dumpkvs()
-
-
-def getkvs():
- return get_current().name2val
-
-
-def log(*args, level=INFO):
- """
- Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
- """
- get_current().log(*args, level=level)
-
-
-def debug(*args):
- log(*args, level=DEBUG)
-
-
-def info(*args):
- log(*args, level=INFO)
-
-
-def warn(*args):
- log(*args, level=WARN)
-
-
-def error(*args):
- log(*args, level=ERROR)
-
-
-def set_level(level):
- """
- Set logging threshold on current logger.
- """
- get_current().set_level(level)
-
-
-def set_comm(comm):
- get_current().set_comm(comm)
-
-
-def get_dir():
- """
- Get directory that log files are being written to.
- will be None if there is no output directory (i.e., if you didn't call start)
- """
- return get_current().get_dir()
-
-
-record_tabular = logkv
-dump_tabular = dumpkvs
-
-
-@contextmanager
-def profile_kv(scopename):
- logkey = "wait_" + scopename
- tstart = time.time()
- try:
- yield
- finally:
- get_current().name2val[logkey] += time.time() - tstart
-
-
-def profile(n):
- """
- Usage:
- @profile("my_func")
- def my_func(): code
- """
-
- def decorator_with_name(func):
- def func_wrapper(*args, **kwargs):
- with profile_kv(n):
- return func(*args, **kwargs)
-
- return func_wrapper
-
- return decorator_with_name
-
-
-# ================================================================
-# Backend
-# ================================================================
-
-
-def get_current():
- if Logger.CURRENT is None:
- _configure_default_logger()
-
- return Logger.CURRENT
-
-
-class Logger(object):
- DEFAULT = None # A logger with no output files. (See right below class definition)
- # So that you can still log to the terminal without setting up any output files
- CURRENT = None # Current logger being used by the free functions above
-
- def __init__(self, dir, output_formats, comm=None):
- self.name2val = defaultdict(float) # values this iteration
- self.name2cnt = defaultdict(int)
- self.level = INFO
- self.dir = dir
- self.output_formats = output_formats
- self.comm = comm
-
- # Logging API, forwarded
- # ----------------------------------------
- def logkv(self, key, val):
- self.name2val[key] = val
-
- def logkv_mean(self, key, val):
- oldval, cnt = self.name2val[key], self.name2cnt[key]
- self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
- self.name2cnt[key] = cnt + 1
-
- def dumpkvs(self):
- if self.comm is None:
- d = self.name2val
- else:
- d = mpi_weighted_mean(
- self.comm,
- {
- name: (val, self.name2cnt.get(name, 1))
- for (name, val) in self.name2val.items()
- },
- )
- if self.comm.rank != 0:
- d["dummy"] = 1 # so we don't get a warning about empty dict
- out = d.copy() # Return the dict for unit testing purposes
- for fmt in self.output_formats:
- if isinstance(fmt, KVWriter):
- fmt.writekvs(d)
- self.name2val.clear()
- self.name2cnt.clear()
- return out
-
- def log(self, *args, level=INFO):
- if self.level <= level:
- self._do_log(args)
-
- # Configuration
- # ----------------------------------------
- def set_level(self, level):
- self.level = level
-
- def set_comm(self, comm):
- self.comm = comm
-
- def get_dir(self):
- return self.dir
-
- def close(self):
- for fmt in self.output_formats:
- fmt.close()
-
- # Misc
- # ----------------------------------------
- def _do_log(self, args):
- for fmt in self.output_formats:
- if isinstance(fmt, SeqWriter):
- fmt.writeseq(map(str, args))
-
-
-def get_rank_without_mpi_import():
- # check environment variables here instead of importing mpi4py
- # to avoid calling MPI_Init() when this module is imported
- for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
- if varname in os.environ:
- return int(os.environ[varname])
- return 0
-
-
-def mpi_weighted_mean(comm, local_name2valcount):
- """
- Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
- Perform a weighted average over dicts that are each on a different node
- Input: local_name2valcount: dict mapping key -> (value, count)
- Returns: key -> mean
- """
- all_name2valcount = comm.gather(local_name2valcount)
- if comm.rank == 0:
- name2sum = defaultdict(float)
- name2count = defaultdict(float)
- for n2vc in all_name2valcount:
- for (name, (val, count)) in n2vc.items():
- try:
- val = float(val)
- except ValueError:
- if comm.rank == 0:
- warnings.warn(
- "WARNING: tried to compute mean on non-float {}={}".format(
- name, val
- )
- )
- else:
- name2sum[name] += val * count
- name2count[name] += count
- return {name: name2sum[name] / name2count[name] for name in name2sum}
- else:
- return {}
-
-
-def configure(dir='./results', format_strs=None, comm=None, log_suffix=""):
- """
- If comm is provided, average all numerical stats across that comm
- """
- if dir is None:
- dir = os.getenv("OPENAI_LOGDIR")
- if dir is None:
- dir = osp.join(
- tempfile.gettempdir(),
- datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
- )
- assert isinstance(dir, str)
- dir = os.path.expanduser(dir)
- os.makedirs(os.path.expanduser(dir), exist_ok=True)
-
- rank = get_rank_without_mpi_import()
- if rank > 0:
- log_suffix = log_suffix + "-rank%03i" % rank
-
- if format_strs is None:
- if rank == 0:
- format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
- else:
- format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
- format_strs = filter(None, format_strs)
- output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
-
- Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
- if output_formats:
- log("Logging to %s" % dir)
-
-
-def _configure_default_logger():
- configure()
- Logger.DEFAULT = Logger.CURRENT
-
-
-def reset():
- if Logger.CURRENT is not Logger.DEFAULT:
- Logger.CURRENT.close()
- Logger.CURRENT = Logger.DEFAULT
- log("Reset logger")
-
-
-@contextmanager
-def scoped_configure(dir=None, format_strs=None, comm=None):
- prevlogger = Logger.CURRENT
- configure(dir=dir, format_strs=format_strs, comm=comm)
- try:
- yield
- finally:
- Logger.CURRENT.close()
- Logger.CURRENT = prevlogger
-
diff --git a/wdm-3d-initial/guided_diffusion/losses.py b/wdm-3d-initial/guided_diffusion/losses.py
deleted file mode 100644
index 251e42e4f36a31bb5e1aeda874b3a45d722000a2..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/losses.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-Helpers for various likelihood-based losses. These are ported from the original
-Ho et al. diffusion models codebase:
-https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
-"""
-
-import numpy as np
-
-import torch as th
-
-
-def normal_kl(mean1, logvar1, mean2, logvar2):
- """
- Compute the KL divergence between two gaussians.
-
- Shapes are automatically broadcasted, so batches can be compared to
- scalars, among other use cases.
- """
- tensor = None
- for obj in (mean1, logvar1, mean2, logvar2):
- if isinstance(obj, th.Tensor):
- tensor = obj
- break
- assert tensor is not None, "at least one argument must be a Tensor"
-
- # Force variances to be Tensors. Broadcasting helps convert scalars to
- # Tensors, but it does not work for th.exp().
- logvar1, logvar2 = [
- x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
- for x in (logvar1, logvar2)
- ]
-
- return 0.5 * (
- -1.0
- + logvar2
- - logvar1
- + th.exp(logvar1 - logvar2)
- + ((mean1 - mean2) ** 2) * th.exp(-logvar2)
- )
-
-
-def approx_standard_normal_cdf(x):
- """
- A fast approximation of the cumulative distribution function of the
- standard normal.
- """
- return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
-
-
-def discretized_gaussian_log_likelihood(x, *, means, log_scales):
- """
- Compute the log-likelihood of a Gaussian distribution discretizing to a
- given image.
-
- :param x: the target images. It is assumed that this was uint8 values,
- rescaled to the range [-1, 1].
- :param means: the Gaussian mean Tensor.
- :param log_scales: the Gaussian log stddev Tensor.
- :return: a tensor like x of log probabilities (in nats).
- """
- assert x.shape == means.shape == log_scales.shape
- centered_x = x - means
- inv_stdv = th.exp(-log_scales)
- plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
- cdf_plus = approx_standard_normal_cdf(plus_in)
- min_in = inv_stdv * (centered_x - 1.0 / 255.0)
- cdf_min = approx_standard_normal_cdf(min_in)
- log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
- log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
- cdf_delta = cdf_plus - cdf_min
- log_probs = th.where(
- x < -0.999,
- log_cdf_plus,
- th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
- )
- assert log_probs.shape == x.shape
- return log_probs
diff --git a/wdm-3d-initial/guided_diffusion/nn.py b/wdm-3d-initial/guided_diffusion/nn.py
deleted file mode 100644
index 58c287a4acb0d2d6018827130f71214f21cfd96d..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/nn.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""
-Various utilities for neural networks.
-"""
-
-import math
-
-import torch as th
-import torch.nn as nn
-
-
-# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
-class SiLU(nn.Module):
- def forward(self, x):
- return x * th.sigmoid(x)
-
-
-class GroupNorm32(nn.GroupNorm):
- def forward(self, x):
- return super().forward(x.float()).type(x.dtype)
-
-
-def conv_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D convolution module.
- """
- if dims == 1:
- return nn.Conv1d(*args, **kwargs)
- elif dims == 2:
- return nn.Conv2d(*args, **kwargs)
- elif dims == 3:
- return nn.Conv3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def linear(*args, **kwargs):
- """
- Create a linear module.
- """
- return nn.Linear(*args, **kwargs)
-
-
-def avg_pool_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D average pooling module.
- """
- if dims == 1:
- return nn.AvgPool1d(*args, **kwargs)
- elif dims == 2:
- return nn.AvgPool2d(*args, **kwargs)
- elif dims == 3:
- return nn.AvgPool3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def update_ema(target_params, source_params, rate=0.99):
- """
- Update target parameters to be closer to those of source parameters using
- an exponential moving average.
-
- :param target_params: the target parameter sequence.
- :param source_params: the source parameter sequence.
- :param rate: the EMA rate (closer to 1 means slower).
- """
- for targ, src in zip(target_params, source_params):
- targ.detach().mul_(rate).add_(src, alpha=1 - rate)
-
-
-def zero_module(module):
- """
- Zero out the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-def scale_module(module, scale):
- """
- Scale the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().mul_(scale)
- return module
-
-
-def mean_flat(tensor):
- """
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(2, len(tensor.shape))))
-
-
-def normalization(channels, groups=32):
- """
- Make a standard normalization layer.
-
- :param channels: number of input channels.
- :return: an nn.Module for normalization.
- """
- return GroupNorm32(groups, channels)
-
-
-def timestep_embedding(timesteps, dim, max_period=10000):
- """
- Create sinusoidal timestep embeddings.
-
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
- These may be fractional.
- :param dim: the dimension of the output.
- :param max_period: controls the minimum frequency of the embeddings.
- :return: an [N x dim] Tensor of positional embeddings.
- """
- half = dim // 2
- freqs = th.exp(
- -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
- ).to(device=timesteps.device)
- args = timesteps[:, None].float() * freqs[None]
- embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
- if dim % 2:
- embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
- return embedding
-
-
-def checkpoint(func, inputs, params, flag):
- """
- Evaluate a function without caching intermediate activations, allowing for
- reduced memory at the expense of extra compute in the backward pass.
-
- :param func: the function to evaluate.
- :param inputs: the argument sequence to pass to `func`.
- :param params: a sequence of parameters `func` depends on but does not
- explicitly take as arguments.
- :param flag: if False, disable gradient checkpointing.
- """
- if flag:
- args = tuple(inputs) + tuple(params)
- return CheckpointFunction.apply(func, len(inputs), *args)
- else:
- return func(*inputs)
-
-
-class CheckpointFunction(th.autograd.Function):
- @staticmethod
- def forward(ctx, run_function, length, *args):
- ctx.run_function = run_function
- ctx.input_tensors = list(args[:length])
- ctx.input_params = list(args[length:])
- with th.no_grad():
- output_tensors = ctx.run_function(*ctx.input_tensors)
- return output_tensors
-
- @staticmethod
- def backward(ctx, *output_grads):
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
- with th.enable_grad():
- # Fixes a bug where the first op in run_function modifies the
- # Tensor storage in place, which is not allowed for detach()'d
- # Tensors.
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
- output_tensors = ctx.run_function(*shallow_copies)
- input_grads = th.autograd.grad(
- output_tensors,
- ctx.input_tensors + ctx.input_params,
- output_grads,
- allow_unused=True,
- )
- del ctx.input_tensors
- del ctx.input_params
- del output_tensors
- return (None, None) + input_grads
diff --git a/wdm-3d-initial/guided_diffusion/resample.py b/wdm-3d-initial/guided_diffusion/resample.py
deleted file mode 100644
index edbeef26f7eec6dbe0158c1a08b404d7de9c5416..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/resample.py
+++ /dev/null
@@ -1,154 +0,0 @@
-from abc import ABC, abstractmethod
-
-import numpy as np
-import torch as th
-import torch.distributed as dist
-
-
-def create_named_schedule_sampler(name, diffusion, maxt):
- """
- Create a ScheduleSampler from a library of pre-defined samplers.
-
- :param name: the name of the sampler.
- :param diffusion: the diffusion object to sample for.
- """
- if name == "uniform":
- return UniformSampler(diffusion, maxt)
- elif name == "loss-second-moment":
- return LossSecondMomentResampler(diffusion)
- else:
- raise NotImplementedError(f"unknown schedule sampler: {name}")
-
-
-class ScheduleSampler(ABC):
- """
- A distribution over timesteps in the diffusion process, intended to reduce
- variance of the objective.
-
- By default, samplers perform unbiased importance sampling, in which the
- objective's mean is unchanged.
- However, subclasses may override sample() to change how the resampled
- terms are reweighted, allowing for actual changes in the objective.
- """
-
- @abstractmethod
- def weights(self):
- """
- Get a numpy array of weights, one per diffusion step.
-
- The weights needn't be normalized, but must be positive.
- """
-
- def sample(self, batch_size, device):
- """
- Importance-sample timesteps for a batch.
-
- :param batch_size: the number of timesteps.
- :param device: the torch device to save to.
- :return: a tuple (timesteps, weights):
- - timesteps: a tensor of timestep indices.
- - weights: a tensor of weights to scale the resulting losses.
- """
- w = self.weights()
- p = w / np.sum(w)
- indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
- indices = th.from_numpy(indices_np).long().to(device)
- weights_np = 1 / (len(p) * p[indices_np])
- weights = th.from_numpy(weights_np).float().to(device)
- return indices, weights
-
-
-class UniformSampler(ScheduleSampler):
- def __init__(self, diffusion, maxt):
- self.diffusion = diffusion
- self._weights = np.ones([maxt])
-
- def weights(self):
- return self._weights
-
-
-class LossAwareSampler(ScheduleSampler):
- def update_with_local_losses(self, local_ts, local_losses):
- """
- Update the reweighting using losses from a model.
-
- Call this method from each rank with a batch of timesteps and the
- corresponding losses for each of those timesteps.
- This method will perform synchronization to make sure all of the ranks
- maintain the exact same reweighting.
-
- :param local_ts: an integer Tensor of timesteps.
- :param local_losses: a 1D Tensor of losses.
- """
- batch_sizes = [
- th.tensor([0], dtype=th.int32, device=local_ts.device)
- for _ in range(dist.get_world_size())
- ]
- dist.all_gather(
- batch_sizes,
- th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
- )
-
- # Pad all_gather batches to be the maximum batch size.
- batch_sizes = [x.item() for x in batch_sizes]
- max_bs = max(batch_sizes)
-
- timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
- loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
- dist.all_gather(timestep_batches, local_ts)
- dist.all_gather(loss_batches, local_losses)
- timesteps = [
- x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
- ]
- losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
- self.update_with_all_losses(timesteps, losses)
-
- @abstractmethod
- def update_with_all_losses(self, ts, losses):
- """
- Update the reweighting using losses from a model.
-
- Sub-classes should override this method to update the reweighting
- using losses from the model.
-
- This method directly updates the reweighting without synchronizing
- between workers. It is called by update_with_local_losses from all
- ranks with identical arguments. Thus, it should have deterministic
- behavior to maintain state across workers.
-
- :param ts: a list of int timesteps.
- :param losses: a list of float losses, one per timestep.
- """
-
-
-class LossSecondMomentResampler(LossAwareSampler):
- def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
- self.diffusion = diffusion
- self.history_per_term = history_per_term
- self.uniform_prob = uniform_prob
- self._loss_history = np.zeros(
- [diffusion.num_timesteps, history_per_term], dtype=np.float64
- )
- self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
-
- def weights(self):
- if not self._warmed_up():
- return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
- weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
- weights /= np.sum(weights)
- weights *= 1 - self.uniform_prob
- weights += self.uniform_prob / len(weights)
- return weights
-
- def update_with_all_losses(self, ts, losses):
- for t, loss in zip(ts, losses):
- if self._loss_counts[t] == self.history_per_term:
- # Shift out the oldest loss term.
- self._loss_history[t, :-1] = self._loss_history[t, 1:]
- self._loss_history[t, -1] = loss
- else:
- self._loss_history[t, self._loss_counts[t]] = loss
- self._loss_counts[t] += 1
-
- def _warmed_up(self):
- return (self._loss_counts == self.history_per_term).all()
diff --git a/wdm-3d-initial/guided_diffusion/respace.py b/wdm-3d-initial/guided_diffusion/respace.py
deleted file mode 100644
index dc2967fa44871275c02063525259929ec6999e8e..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/respace.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import numpy as np
-import torch as th
-
-from .gaussian_diffusion import GaussianDiffusion
-
-
-def space_timesteps(num_timesteps, section_counts):
- """
- Create a list of timesteps to use from an original diffusion process,
- given the number of timesteps we want to take from equally-sized portions
- of the original process.
-
- For example, if there's 300 timesteps and the section counts are [10,15,20]
- then the first 100 timesteps are strided to be 10 timesteps, the second 100
- are strided to be 15 timesteps, and the final 100 are strided to be 20.
-
- If the stride is a string starting with "ddim", then the fixed striding
- from the DDIM paper is used, and only one section is allowed.
-
- :param num_timesteps: the number of diffusion steps in the original
- process to divide up.
- :param section_counts: either a list of numbers, or a string containing
- comma-separated numbers, indicating the step count
- per section. As a special case, use "ddimN" where N
- is a number of steps to use the striding from the
- DDIM paper.
- :return: a set of diffusion steps from the original process to use.
- """
- if isinstance(section_counts, str):
- if section_counts.startswith("ddim"):
- desired_count = int(section_counts[len("ddim") :])
- print('desired_cound', desired_count )
- for i in range(1, num_timesteps):
- if len(range(0, num_timesteps, i)) == desired_count:
- return set(range(0, num_timesteps, i))
- raise ValueError(
- f"cannot create exactly {num_timesteps} steps with an integer stride"
- )
- section_counts = [int(x) for x in section_counts.split(",")]
- # print('sectioncount', section_counts)
- size_per = num_timesteps // len(section_counts)
- extra = num_timesteps % len(section_counts)
- start_idx = 0
- all_steps = []
- for i, section_count in enumerate(section_counts):
- size = size_per + (1 if i < extra else 0)
- if size < section_count:
- raise ValueError(
- f"cannot divide section of {size} steps into {section_count}"
- )
- if section_count <= 1:
- frac_stride = 1
- else:
- frac_stride = (size - 1) / (section_count - 1)
- cur_idx = 0.0
- taken_steps = []
- for _ in range(section_count):
- taken_steps.append(start_idx + round(cur_idx))
- cur_idx += frac_stride
- all_steps += taken_steps
- start_idx += size
- return set(all_steps)
-
-
-class SpacedDiffusion(GaussianDiffusion):
- """
- A diffusion process which can skip steps in a base diffusion process.
-
- :param use_timesteps: a collection (sequence or set) of timesteps from the
- original diffusion process to retain.
- :param kwargs: the kwargs to create the base diffusion process.
- """
-
- def __init__(self, use_timesteps, **kwargs):
- self.use_timesteps = set(use_timesteps)
- self.timestep_map = []
- self.original_num_steps = len(kwargs["betas"])
-
- base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
- last_alpha_cumprod = 1.0
- new_betas = []
- for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
- if i in self.use_timesteps:
- new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
- last_alpha_cumprod = alpha_cumprod
- self.timestep_map.append(i)
- kwargs["betas"] = np.array(new_betas)
- super().__init__(**kwargs)
-
- def p_mean_variance(
- self, model, *args, **kwargs
- ): # pylint: disable=signature-differs
- return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
-
- def training_losses(
- self, model, *args, **kwargs
- ): # pylint: disable=signature-differs
- return super().training_losses(self._wrap_model(model), *args, **kwargs)
-
- def condition_mean(self, cond_fn, *args, **kwargs):
- return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
-
- def condition_score(self, cond_fn, *args, **kwargs):
- return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
-
- def _wrap_model(self, model):
- if isinstance(model, _WrappedModel):
- return model
- return _WrappedModel(
- model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
- )
-
-
- def _scale_timesteps(self, t):
- # Scaling is done by the wrapped model.
- return t
-
-
-class _WrappedModel:
- def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
- self.model = model
- self.timestep_map = timestep_map
- self.rescale_timesteps = rescale_timesteps
- self.original_num_steps = original_num_steps
-
-
- def __call__(self, x, ts, **kwargs):
- map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
- new_ts = map_tensor[ts]
- if self.rescale_timesteps:
- new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
- return self.model(x, new_ts, **kwargs)
-
-
-
diff --git a/wdm-3d-initial/guided_diffusion/script_util.py b/wdm-3d-initial/guided_diffusion/script_util.py
deleted file mode 100644
index 50902ee39976661dcdbb3bbe200f84e2f28129e1..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/script_util.py
+++ /dev/null
@@ -1,574 +0,0 @@
-import argparse
-import inspect
-
-from . import gaussian_diffusion as gd
-from .respace import SpacedDiffusion, space_timesteps
-from .unet import SuperResModel, UNetModel, EncoderUNetModel
-from .wunet import WavUNetModel
-
-NUM_CLASSES = 2
-
-
-def diffusion_defaults():
- """
- Defaults for image and classifier training.
- """
- return dict(
- learn_sigma=False,
- diffusion_steps=1000,
- noise_schedule="linear",
- timestep_respacing="",
- use_kl=False,
- predict_xstart=False,
- rescale_timesteps=False,
- rescale_learned_sigmas=False,
- dataset='brats',
- dims=2,
- num_groups=32,
- in_channels=1,
- )
-
-
-def classifier_defaults():
- """
- Defaults for classifier models.
- """
- return dict(
- image_size=64,
- classifier_use_fp16=False,
- classifier_width=128,
- classifier_depth=2,
- classifier_attention_resolutions="32,16,8", # 16
- classifier_num_head_channels=64,
- classifier_use_scale_shift_norm=True, # False
- classifier_resblock_updown=True, # False
- classifier_pool="spatial",
- classifier_channel_mult="1,1,2,2,4,4",
- dataset='brats'
- )
-
-
-def model_and_diffusion_defaults():
- """
- Defaults for image training.
- """
- res = dict(
- image_size=64,
- num_channels=128,
- num_res_blocks=2,
- num_heads=4,
- num_heads_upsample=-1,
- num_head_channels=-1,
- attention_resolutions="16,8",
- channel_mult="",
- dropout=0.0,
- class_cond=False,
- use_checkpoint=False,
- use_scale_shift_norm=True,
- resblock_updown=True,
- use_fp16=False,
- use_new_attention_order=False,
- dims=2,
- num_groups=32,
- in_channels=1,
- out_channels=0, # automatically determine if 0
- bottleneck_attention=True,
- resample_2d=True,
- additive_skips=False,
- mode='default',
- use_freq=False,
- predict_xstart=False,
- )
- res.update(diffusion_defaults())
- return res
-
-
-def classifier_and_diffusion_defaults():
- res = classifier_defaults()
- res.update(diffusion_defaults())
- return res
-
-
-def create_model_and_diffusion(
- image_size,
- class_cond,
- learn_sigma,
- num_channels,
- num_res_blocks,
- channel_mult,
- num_heads,
- num_head_channels,
- num_heads_upsample,
- attention_resolutions,
- dropout,
- diffusion_steps,
- noise_schedule,
- timestep_respacing,
- use_kl,
- predict_xstart,
- rescale_timesteps,
- rescale_learned_sigmas,
- use_checkpoint,
- use_scale_shift_norm,
- resblock_updown,
- use_fp16,
- use_new_attention_order,
- dims,
- num_groups,
- in_channels,
- out_channels,
- bottleneck_attention,
- resample_2d,
- additive_skips,
- mode,
- use_freq,
- dataset,
-):
- model = create_model(
- image_size,
- num_channels,
- num_res_blocks,
- channel_mult=channel_mult,
- learn_sigma=learn_sigma,
- class_cond=class_cond,
- use_checkpoint=use_checkpoint,
- attention_resolutions=attention_resolutions,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- num_heads_upsample=num_heads_upsample,
- use_scale_shift_norm=use_scale_shift_norm,
- dropout=dropout,
- resblock_updown=resblock_updown,
- use_fp16=use_fp16,
- use_new_attention_order=use_new_attention_order,
- dims=dims,
- num_groups=num_groups,
- in_channels=in_channels,
- out_channels=out_channels,
- bottleneck_attention=bottleneck_attention,
- resample_2d=resample_2d,
- additive_skips=additive_skips,
- use_freq=use_freq,
- )
- diffusion = create_gaussian_diffusion(
- steps=diffusion_steps,
- learn_sigma=learn_sigma,
- noise_schedule=noise_schedule,
- use_kl=use_kl,
- predict_xstart=predict_xstart,
- rescale_timesteps=rescale_timesteps,
- rescale_learned_sigmas=rescale_learned_sigmas,
- timestep_respacing=timestep_respacing,
- mode=mode,
- )
- return model, diffusion
-
-
-def create_model(
- image_size,
- num_channels,
- num_res_blocks,
- channel_mult="",
- learn_sigma=False,
- class_cond=False,
- use_checkpoint=False,
- attention_resolutions="16",
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- dropout=0,
- resblock_updown=True,
- use_fp16=False,
- use_new_attention_order=False,
- num_groups=32,
- dims=2,
- in_channels=1,
- out_channels=0, # automatically determine if 0
- bottleneck_attention=True,
- resample_2d=True,
- additive_skips=False,
- use_freq=False,
-):
- if not channel_mult:
- if image_size == 512:
- channel_mult = (1, 1, 2, 2, 4, 4)
- elif image_size == 256:
- channel_mult = (1, 2, 2, 4, 4, 4)
- elif image_size == 128:
- channel_mult = (1, 2, 2, 4, 4)
- elif image_size == 64:
- channel_mult = (1, 2, 3, 4)
- else:
- raise ValueError(f"[MODEL] Unsupported image size: {image_size}")
- else:
- if isinstance(channel_mult, str):
- from ast import literal_eval
- channel_mult = literal_eval(channel_mult)
- elif isinstance(channel_mult, tuple): # do nothing
- pass
- else:
- raise ValueError(f"[MODEL] Value for {channel_mult=} not supported")
-
- attention_ds = []
- if attention_resolutions:
- for res in attention_resolutions.split(","):
- attention_ds.append(image_size // int(res))
- if out_channels == 0:
- out_channels = (2*in_channels if learn_sigma else in_channels)
-
- if not use_freq:
- return UNetModel(
- image_size=image_size,
- in_channels=in_channels,
- model_channels=num_channels,
- out_channels=out_channels * (1 if not learn_sigma else 2),
- num_res_blocks=num_res_blocks,
- attention_resolutions=tuple(attention_ds),
- dropout=dropout,
- channel_mult=channel_mult,
- num_classes=(NUM_CLASSES if class_cond else None),
- use_checkpoint=use_checkpoint,
- use_fp16=use_fp16,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- num_heads_upsample=num_heads_upsample,
- use_scale_shift_norm=use_scale_shift_norm,
- resblock_updown=resblock_updown,
- use_new_attention_order=use_new_attention_order,
- dims=dims,
- num_groups=num_groups,
- bottleneck_attention=bottleneck_attention,
- additive_skips=additive_skips,
- resample_2d=resample_2d,
- )
- else:
- return WavUNetModel(
- image_size=image_size,
- in_channels=in_channels,
- model_channels=num_channels,
- out_channels=out_channels * (1 if not learn_sigma else 2),
- num_res_blocks=num_res_blocks,
- attention_resolutions=tuple(attention_ds),
- dropout=dropout,
- channel_mult=channel_mult,
- num_classes=(NUM_CLASSES if class_cond else None),
- use_checkpoint=use_checkpoint,
- use_fp16=use_fp16,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- num_heads_upsample=num_heads_upsample,
- use_scale_shift_norm=use_scale_shift_norm,
- resblock_updown=resblock_updown,
- use_new_attention_order=use_new_attention_order,
- dims=dims,
- num_groups=num_groups,
- bottleneck_attention=bottleneck_attention,
- additive_skips=additive_skips,
- use_freq=use_freq,
- )
-
-
-def create_classifier_and_diffusion(
- image_size,
- classifier_use_fp16,
- classifier_width,
- classifier_depth,
- classifier_attention_resolutions,
- classifier_num_head_channels,
- classifier_use_scale_shift_norm,
- classifier_resblock_updown,
- classifier_pool,
- classifier_channel_mult,
- learn_sigma,
- diffusion_steps,
- noise_schedule,
- timestep_respacing,
- use_kl,
- predict_xstart,
- rescale_timesteps,
- rescale_learned_sigmas,
- dataset,
- dims,
- num_groups,
- in_channels,
-):
- print('timestepresp2', timestep_respacing)
- classifier = create_classifier(
- image_size,
- classifier_use_fp16,
- classifier_width,
- classifier_depth,
- classifier_attention_resolutions,
- classifier_use_scale_shift_norm,
- classifier_resblock_updown,
- classifier_pool,
- dataset,
- dims=dims,
- num_groups=num_groups,
- in_channels=in_channels,
- num_head_channels=classifier_num_head_channels,
- classifier_channel_mult=classifier_channel_mult,
- )
- diffusion = create_gaussian_diffusion(
- steps=diffusion_steps,
- learn_sigma=learn_sigma,
- noise_schedule=noise_schedule,
- use_kl=use_kl,
- predict_xstart=predict_xstart,
- rescale_timesteps=rescale_timesteps,
- rescale_learned_sigmas=rescale_learned_sigmas,
- timestep_respacing=timestep_respacing,
- )
- return classifier, diffusion
-
-
-def create_classifier(
- image_size,
- classifier_use_fp16,
- classifier_width,
- classifier_depth,
- classifier_attention_resolutions,
- classifier_use_scale_shift_norm,
- classifier_resblock_updown,
- classifier_pool,
- dataset,
- num_groups=32,
- dims=2,
- in_channels=1,
- num_head_channels=64,
- classifier_channel_mult="",
-):
- channel_mult = classifier_channel_mult
- if not channel_mult:
- if image_size == 256:
- channel_mult = (1, 1, 2, 2, 4, 4)
- elif image_size == 128:
- channel_mult = (1, 1, 2, 3, 4)
- elif image_size == 64:
- channel_mult = (1, 2, 3, 4)
- else:
- raise ValueError(f"unsupported image size: {image_size}")
- else:
- if isinstance(channel_mult, str):
- #channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
- from ast import literal_eval
- channel_mult = literal_eval(channel_mult)
- elif isinstance(channel_mult, tuple): # do nothing
- pass
- else:
- raise ValueError(f"value for {channel_mult=} not supported")
-
- attention_ds = []
- if classifier_attention_resolutions:
- for res in classifier_attention_resolutions.split(","):
- attention_ds.append(image_size // int(res))
-
- print('number_in_channels classifier', in_channels)
-
-
- return EncoderUNetModel(
- image_size=image_size,
- in_channels=in_channels,
- model_channels=classifier_width,
- out_channels=2,
- num_res_blocks=classifier_depth,
- attention_resolutions=tuple(attention_ds),
- channel_mult=channel_mult,
- use_fp16=classifier_use_fp16,
- num_head_channels=num_head_channels,
- use_scale_shift_norm=classifier_use_scale_shift_norm,
- resblock_updown=classifier_resblock_updown,
- pool=classifier_pool,
- num_groups=num_groups,
- dims=dims,
- )
-
-
-def sr_model_and_diffusion_defaults():
- res = model_and_diffusion_defaults()
- res["large_size"] = 256
- res["small_size"] = 64
- arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
- for k in res.copy().keys():
- if k not in arg_names:
- del res[k]
- return res
-
-
-def sr_create_model_and_diffusion(
- large_size,
- small_size,
- class_cond,
- learn_sigma,
- num_channels,
- num_res_blocks,
- num_heads,
- num_head_channels,
- num_heads_upsample,
- attention_resolutions,
- dropout,
- diffusion_steps,
- noise_schedule,
- timestep_respacing,
- use_kl,
- predict_xstart,
- rescale_timesteps,
- rescale_learned_sigmas,
- use_checkpoint,
- use_scale_shift_norm,
- resblock_updown,
- use_fp16,
-):
- print('timestepresp3', timestep_respacing)
- model = sr_create_model(
- large_size,
- small_size,
- num_channels,
- num_res_blocks,
- learn_sigma=learn_sigma,
- class_cond=class_cond,
- use_checkpoint=use_checkpoint,
- attention_resolutions=attention_resolutions,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- num_heads_upsample=num_heads_upsample,
- use_scale_shift_norm=use_scale_shift_norm,
- dropout=dropout,
- resblock_updown=resblock_updown,
- use_fp16=use_fp16,
- )
- diffusion = create_gaussian_diffusion(
- steps=diffusion_steps,
- learn_sigma=learn_sigma,
- noise_schedule=noise_schedule,
- use_kl=use_kl,
- predict_xstart=predict_xstart,
- rescale_timesteps=rescale_timesteps,
- rescale_learned_sigmas=rescale_learned_sigmas,
- timestep_respacing=timestep_respacing,
- )
- return model, diffusion
-
-
-def sr_create_model(
- large_size,
- small_size,
- num_channels,
- num_res_blocks,
- learn_sigma,
- class_cond,
- use_checkpoint,
- attention_resolutions,
- num_heads,
- num_head_channels,
- num_heads_upsample,
- use_scale_shift_norm,
- dropout,
- resblock_updown,
- use_fp16,
-):
- _ = small_size # hack to prevent unused variable
-
- if large_size == 512:
- channel_mult = (1, 1, 2, 2, 4, 4)
- elif large_size == 256:
- channel_mult = (1, 1, 2, 2, 4, 4)
- elif large_size == 64:
- channel_mult = (1, 2, 3, 4)
- else:
- raise ValueError(f"unsupported large size: {large_size}")
-
- attention_ds = []
- for res in attention_resolutions.split(","):
- attention_ds.append(large_size // int(res))
-
- return SuperResModel(
- image_size=large_size,
- in_channels=3,
- model_channels=num_channels,
- out_channels=(3 if not learn_sigma else 6),
- num_res_blocks=num_res_blocks,
- attention_resolutions=tuple(attention_ds),
- dropout=dropout,
- channel_mult=channel_mult,
- num_classes=(NUM_CLASSES if class_cond else None),
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- num_heads_upsample=num_heads_upsample,
- use_scale_shift_norm=use_scale_shift_norm,
- resblock_updown=resblock_updown,
- use_fp16=use_fp16,
- )
-
-
-def create_gaussian_diffusion(
- *,
- steps=1000,
- learn_sigma=False,
- sigma_small=False,
- noise_schedule="linear",
- use_kl=False,
- predict_xstart=False,
- rescale_timesteps=False,
- rescale_learned_sigmas=False,
- timestep_respacing="",
- mode='default',
-):
- betas = gd.get_named_beta_schedule(noise_schedule, steps)
- if use_kl:
- loss_type = gd.LossType.RESCALED_KL
- elif rescale_learned_sigmas:
- loss_type = gd.LossType.RESCALED_MSE
- else:
- loss_type = gd.LossType.MSE
-
- if not timestep_respacing:
- timestep_respacing = [steps]
-
- return SpacedDiffusion(
- use_timesteps=space_timesteps(steps, timestep_respacing),
- betas=betas,
- model_mean_type=(gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X),
- model_var_type=(
- (
- gd.ModelVarType.FIXED_LARGE
- if not sigma_small
- else gd.ModelVarType.FIXED_SMALL
- )
- if not learn_sigma
- else gd.ModelVarType.LEARNED_RANGE
- ),
- loss_type=loss_type,
- rescale_timesteps=rescale_timesteps,
- mode=mode,
- )
-
-
-def add_dict_to_argparser(parser, default_dict):
- for k, v in default_dict.items():
- v_type = type(v)
- if v is None:
- v_type = str
- elif isinstance(v, bool):
- v_type = str2bool
- parser.add_argument(f"--{k}", default=v, type=v_type)
-
-
-def args_to_dict(args, keys):
- return {k: getattr(args, k) for k in keys}
-
-
-def str2bool(v):
- """
- https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
- """
- if isinstance(v, bool):
- return v
- if v.lower() in ("yes", "true", "t", "y", "1"):
- return True
- elif v.lower() in ("no", "false", "f", "n", "0"):
- return False
- else:
- raise argparse.ArgumentTypeError("boolean value expected")
diff --git a/wdm-3d-initial/guided_diffusion/train_util.py b/wdm-3d-initial/guided_diffusion/train_util.py
deleted file mode 100644
index df2a0fd3a1ea8fb315e4d8a0780c554ffd665795..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/train_util.py
+++ /dev/null
@@ -1,376 +0,0 @@
-import copy
-import functools
-import os
-
-import blobfile as bf
-import torch as th
-import torch.distributed as dist
-import torch.utils.tensorboard
-from torch.optim import AdamW
-import torch.cuda.amp as amp
-
-import itertools
-
-from . import dist_util, logger
-from .resample import LossAwareSampler, UniformSampler
-from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
-
-INITIAL_LOG_LOSS_SCALE = 20.0
-
-def visualize(img):
- _min = img.min()
- _max = img.max()
- normalized_img = (img - _min)/ (_max - _min)
- return normalized_img
-
-class TrainLoop:
- def __init__(
- self,
- *,
- model,
- diffusion,
- data,
- batch_size,
- in_channels,
- image_size,
- microbatch,
- lr,
- ema_rate,
- log_interval,
- save_interval,
- resume_checkpoint,
- resume_step,
- use_fp16=False,
- fp16_scale_growth=1e-3,
- schedule_sampler=None,
- weight_decay=0.0,
- lr_anneal_steps=0,
- dataset='brats',
- summary_writer=None,
- mode='default',
- loss_level='image',
- ):
- self.summary_writer = summary_writer
- self.mode = mode
- self.model = model
- self.diffusion = diffusion
- self.datal = data
- self.dataset = dataset
- self.iterdatal = iter(data)
- self.batch_size = batch_size
- self.in_channels = in_channels
- self.image_size = image_size
- self.microbatch = microbatch if microbatch > 0 else batch_size
- self.lr = lr
- self.ema_rate = (
- [ema_rate]
- if isinstance(ema_rate, float)
- else [float(x) for x in ema_rate.split(",")]
- )
- self.log_interval = log_interval
- self.save_interval = save_interval
- self.resume_checkpoint = resume_checkpoint
- self.use_fp16 = use_fp16
- if self.use_fp16:
- self.grad_scaler = amp.GradScaler()
- else:
- self.grad_scaler = amp.GradScaler(enabled=False)
-
- self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
- self.weight_decay = weight_decay
- self.lr_anneal_steps = lr_anneal_steps
-
- self.dwt = DWT_3D('haar')
- self.idwt = IDWT_3D('haar')
-
- self.loss_level = loss_level
-
- self.step = 1
- self.resume_step = resume_step
- self.global_batch = self.batch_size * dist.get_world_size()
-
- self.sync_cuda = th.cuda.is_available()
-
- self._load_and_sync_parameters()
-
- self.opt = AdamW(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
- if self.resume_step:
- print("Resume Step: " + str(self.resume_step))
- self._load_optimizer_state()
-
- if not th.cuda.is_available():
- logger.warn(
- "Training requires CUDA. "
- )
-
- def _load_and_sync_parameters(self):
- resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
-
- if resume_checkpoint:
- print('resume model ...')
- self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
- if dist.get_rank() == 0:
- logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
- self.model.load_state_dict(
- dist_util.load_state_dict(
- resume_checkpoint, map_location=dist_util.dev()
- )
- )
-
- dist_util.sync_params(self.model.parameters())
-
-
- def _load_optimizer_state(self):
- main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
- opt_checkpoint = bf.join(
- bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
- )
- if bf.exists(opt_checkpoint):
- logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
- state_dict = dist_util.load_state_dict(
- opt_checkpoint, map_location=dist_util.dev()
- )
- self.opt.load_state_dict(state_dict)
- else:
- print('no optimizer checkpoint exists')
-
- def run_loop(self):
- import time
- t = time.time()
- while not self.lr_anneal_steps or self.step + self.resume_step < self.lr_anneal_steps:
- t_total = time.time() - t
- t = time.time()
- if self.dataset in ['brats', 'lidc-idri']:
- try:
- batch = next(self.iterdatal)
- cond = {}
- except StopIteration:
- self.iterdatal = iter(self.datal)
- batch = next(self.iterdatal)
- cond = {}
-
- batch = batch.to(dist_util.dev())
-
- t_fwd = time.time()
- t_load = t_fwd-t
-
- lossmse, sample, sample_idwt = self.run_step(batch, cond)
-
- t_fwd = time.time()-t_fwd
-
- names = ["LLL", "LLH", "LHL", "LHH", "HLL", "HLH", "HHL", "HHH"]
-
- if self.summary_writer is not None:
- self.summary_writer.add_scalar('time/load', t_load, global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('time/forward', t_fwd, global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('time/total', t_total, global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/MSE', lossmse.item(), global_step=self.step + self.resume_step)
-
- if self.step % 200 == 0:
- image_size = sample_idwt.size()[2]
- midplane = sample_idwt[0, 0, :, :, image_size // 2]
- self.summary_writer.add_image('sample/x_0', midplane.unsqueeze(0),
- global_step=self.step + self.resume_step)
-
- image_size = sample.size()[2]
- for ch in range(8):
- midplane = sample[0, ch, :, :, image_size // 2]
- self.summary_writer.add_image('sample/{}'.format(names[ch]), midplane.unsqueeze(0),
- global_step=self.step + self.resume_step)
-
- if self.step % self.log_interval == 0:
- logger.dumpkvs()
-
- if self.step % self.save_interval == 0:
- self.save()
- # Run for a finite amount of time in integration tests.
- if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
- return
- self.step += 1
-
- # Save the last checkpoint if it wasn't already saved.
- if (self.step - 1) % self.save_interval != 0:
- self.save()
-
- def run_step(self, batch, cond, label=None, info=dict()):
- lossmse, sample, sample_idwt = self.forward_backward(batch, cond, label)
-
- if self.use_fp16:
- self.grad_scaler.unscale_(self.opt) # check self.grad_scaler._per_optimizer_states
-
- # compute norms
- with torch.no_grad():
- param_max_norm = max([p.abs().max().item() for p in self.model.parameters()])
- grad_max_norm = max([p.grad.abs().max().item() for p in self.model.parameters()])
- info['norm/param_max'] = param_max_norm
- info['norm/grad_max'] = grad_max_norm
-
- if not torch.isfinite(lossmse): #infinite
- if not torch.isfinite(torch.tensor(param_max_norm)):
- logger.log(f"Model parameters contain non-finite value {param_max_norm}, entering breakpoint", level=logger.ERROR)
- breakpoint()
- else:
- logger.log(f"Model parameters are finite, but loss is not: {lossmse}"
- "\n -> update will be skipped in grad_scaler.step()", level=logger.WARN)
-
- if self.use_fp16:
- print("Use fp16 ...")
- self.grad_scaler.step(self.opt)
- self.grad_scaler.update()
- info['scale'] = self.grad_scaler.get_scale()
- else:
- self.opt.step()
- self._anneal_lr()
- self.log_step()
- return lossmse, sample, sample_idwt
-
- def forward_backward(self, batch, cond, label=None):
- for p in self.model.parameters(): # Zero out gradient
- p.grad = None
-
- for i in range(0, batch.shape[0], self.microbatch):
- micro = batch[i: i + self.microbatch].to(dist_util.dev())
-
- if label is not None:
- micro_label = label[i: i + self.microbatch].to(dist_util.dev())
- else:
- micro_label = None
-
- micro_cond = None
-
- last_batch = (i + self.microbatch) >= batch.shape[0]
- t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
-
- compute_losses = functools.partial(self.diffusion.training_losses,
- self.model,
- x_start=micro,
- t=t,
- model_kwargs=micro_cond,
- labels=micro_label,
- mode=self.mode,
- )
- losses1 = compute_losses()
-
- if isinstance(self.schedule_sampler, LossAwareSampler):
- self.schedule_sampler.update_with_local_losses(
- t, losses1["loss"].detach()
- )
-
- losses = losses1[0] # Loss value
- sample = losses1[1] # Denoised subbands at t=0
- sample_idwt = losses1[2] # Inverse wavelet transformed denoised subbands at t=0
-
- # Log wavelet level loss
- self.summary_writer.add_scalar('loss/mse_wav_lll', losses["mse_wav"][0].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_llh', losses["mse_wav"][1].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_lhl', losses["mse_wav"][2].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_lhh', losses["mse_wav"][3].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hll', losses["mse_wav"][4].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hlh', losses["mse_wav"][5].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hhl', losses["mse_wav"][6].item(),
- global_step=self.step + self.resume_step)
- self.summary_writer.add_scalar('loss/mse_wav_hhh', losses["mse_wav"][7].item(),
- global_step=self.step + self.resume_step)
-
- weights = th.ones(len(losses["mse_wav"])).cuda() # Equally weight all wavelet channel losses
-
- loss = (losses["mse_wav"] * weights).mean()
- lossmse = loss.detach()
-
- log_loss_dict(self.diffusion, t, {k: v * weights for k, v in losses.items()})
-
- # perform some finiteness checks
- if not torch.isfinite(loss):
- logger.log(f"Encountered non-finite loss {loss}")
- if self.use_fp16:
- self.grad_scaler.scale(loss).backward()
- else:
- loss.backward()
-
- return lossmse.detach(), sample, sample_idwt
-
- def _anneal_lr(self):
- if not self.lr_anneal_steps:
- return
- frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
- lr = self.lr * (1 - frac_done)
- for param_group in self.opt.param_groups:
- param_group["lr"] = lr
-
- def log_step(self):
- logger.logkv("step", self.step + self.resume_step)
- logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
-
- def save(self):
- def save_checkpoint(rate, state_dict):
- if dist.get_rank() == 0:
- logger.log("Saving model...")
- if self.dataset == 'brats':
- filename = f"brats_{(self.step+self.resume_step):06d}.pt"
- elif self.dataset == 'lidc-idri':
- filename = f"lidc-idri_{(self.step+self.resume_step):06d}.pt"
- else:
- raise ValueError(f'dataset {self.dataset} not implemented')
-
- with bf.BlobFile(bf.join(get_blob_logdir(), 'checkpoints', filename), "wb") as f:
- th.save(state_dict, f)
-
- save_checkpoint(0, self.model.state_dict())
-
- if dist.get_rank() == 0:
- checkpoint_dir = os.path.join(logger.get_dir(), 'checkpoints')
- with bf.BlobFile(
- bf.join(checkpoint_dir, f"opt{(self.step+self.resume_step):06d}.pt"),
- "wb",
- ) as f:
- th.save(self.opt.state_dict(), f)
-
-
-def parse_resume_step_from_filename(filename):
- """
- Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
- checkpoint's number of steps.
- """
-
- split = os.path.basename(filename)
- split = split.split(".")[-2] # remove extension
- split = split.split("_")[-1] # remove possible underscores, keep only last word
- # extract trailing number
- reversed_split = []
- for c in reversed(split):
- if not c.isdigit():
- break
- reversed_split.append(c)
- split = ''.join(reversed(reversed_split))
- split = ''.join(c for c in split if c.isdigit()) # remove non-digits
- try:
- return int(split)
- except ValueError:
- return 0
-
-
-def get_blob_logdir():
- # You can change this to be a separate path to save checkpoints to
- # a blobstore or some external drive.
- return logger.get_dir()
-
-
-def find_resume_checkpoint():
- # On your infrastructure, you may want to override this to automatically
- # discover the latest checkpoint on your blob storage, etc.
- return None
-
-
-def log_loss_dict(diffusion, ts, losses):
- for key, values in losses.items():
- logger.logkv_mean(key, values.mean().item())
- # Log the quantiles (four quartiles, in particular).
- for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
- quartile = int(4 * sub_t / diffusion.num_timesteps)
- logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
diff --git a/wdm-3d-initial/guided_diffusion/unet.py b/wdm-3d-initial/guided_diffusion/unet.py
deleted file mode 100644
index 30b45c62a2b16c9b36af20a00496547268b7fa46..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/unet.py
+++ /dev/null
@@ -1,1044 +0,0 @@
-from abc import abstractmethod
-
-import math
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .nn import checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding
-from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- self.resample_2d = resample_2d
- if use_conv:
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3 and self.resample_2d:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
- if self.use_conv:
- x = self.conv(x)
- return x
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = (1, 2, 2) if dims == 3 and resample_2d else 2
- if use_conv:
- self.op = conv_nd(
- dims, self.channels, self.out_channels, 3, stride=stride, padding=1
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class WaveletGatingDownsample(nn.Module):
- """
- A wavelet gated downsampling operation.
-
- This layer takes some input features and a timestep embedding vector as input and
- outputs the sum over gated wavelet coefficients, thus performing a downsampling.
-
- :param channels: channels in the inputs and outputs.
- :param temb_dim: timestep embedding dimension.
- """
-
- def __init__(self, channels, temb_dim):
- super().__init__()
- # Define wavelet transform
- self.dwt = DWT_3D('haar')
-
- # Define gating network
- self.pooling = nn.AdaptiveAvgPool3d(1)
- self.fnn = nn.Sequential(
- nn.Linear(channels + temb_dim, 128),
- nn.SiLU(),
- nn.Linear(128, 8),
- )
- self.act = nn.Sigmoid()
-
- def forward(self, x, temb):
- # Get gating values
- p = self.pooling(x).squeeze(-1).squeeze(-1).squeeze(-1) # Average pool over feature dimension
- c = th.cat((p, temb), dim=1) # Combine pooled input features and temb
- gating_values = self.act(self.fnn(c)) # Obtain gating values
-
- wavelet_subbands = self.dwt(x)
- scaled_wavelet_subbands = [band * gating.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
- for band, gating in zip(wavelet_subbands, th.split(gating_values, 1, dim=1))]
- return sum(scaled_wavelet_subbands)
-
-
-class WaveletGatingUpsample(nn.Module):
- """
- A wavelet gated upsampling operation.
-
- This layer takes some input features and a timestep embedding vector as input and
- outputs gated inverse wavelet transformed bands, thus performing upsampling.
-
- :param channels: channels in the inputs and outputs.
- :param temb_dim: timestep embedding dimension.
- """
-
- def __init__(self, channels, temb_dim):
- super().__init__()
- # Define inverse wavelet transform
- self.idwt = IDWT_3D('haar')
-
- # Define gating network
- self.pooling = nn.AdaptiveAvgPool3d(1)
- self.fnn = nn.Sequential(
- nn.Linear(channels + temb_dim, 128),
- nn.SiLU(),
- nn.Linear(128, 8),
- )
- self.act = nn.Sigmoid()
-
- # Define conv for channel expansion
- self.conv_exp = nn.Conv3d(channels, channels * 8, kernel_size=1)
-
- def forward(self, x, temb):
- # Get gating values
- p = self.pooling(x).squeeze(-1).squeeze(-1).squeeze(-1) # Average pool over feature dimension
- c = th.cat((p, temb), dim=1) # Combine pooled input features and temb
- gating_values = self.act(self.fnn(c)) # Obtain gating values
-
- # Perform a channel expansion and chunk into 8 wavelet subbands
- wavelet_subbands = self.conv_exp(x)
- wavelet_subbands = wavelet_subbands.chunk(8, dim=1)
-
- scaled_wavelet_subbands = [band * gating.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
- for band, gating in zip(wavelet_subbands, th.split(gating_values, 1, dim=1))]
-
- return self.idwt(*scaled_wavelet_subbands[:8])
-
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
-
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- :param use_wgupdown: if True, use wavelet gated up- and downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- up=False,
- down=False,
- num_groups=32,
- resample_2d=True,
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
- self.num_groups = num_groups
-
- self.in_layers = nn.Sequential(
- normalization(channels, self.num_groups),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- # when using "standard" upsampling
- self.h_upd = Upsample(channels, False, dims, resample_2d=resample_2d)
- self.x_upd = Upsample(channels, False, dims, resample_2d=resample_2d)
-
- elif down:
- # when using "standard" downsampling
- self.h_upd = Downsample(channels, False, dims, resample_2d=resample_2d)
- self.x_upd = Downsample(channels, False, dims, resample_2d=resample_2d)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
- self.out_layers = nn.Sequential(
- normalization(self.out_channels, self.num_groups),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(
- dims, channels, self.out_channels, 3, padding=1
- )
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- def forward(self, x, emb):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
-
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- return checkpoint(
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
- )
-
- def _forward(self, x, emb):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
-
- emb_out = self.emb_layers(emb).type(h.dtype)
-
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
-
- if self.use_scale_shift_norm:
- print("You use scale-shift norm")
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
-
- else:
- h = h + emb_out
- h = self.out_layers(h)
-
- return self.skip_connection(x) + h
-
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
-
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- num_groups=32,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels, num_groups)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(self._forward, (x,), self.parameters(), True)
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1)
- qkv = self.qkv(self.norm(x))
- h = self.attention(qkv)
- h = self.proj_out(h)
- return (x + h).reshape(b, c, *spatial)
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial ** 2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
-
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially
- increased efficiency.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- num_classes=None,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- num_groups=32,
- bottleneck_attention=True,
- resample_2d=True,
- additive_skips=False,
- decoder_device_thresh=0,
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
- self.num_groups = num_groups
- self.bottleneck_attention = bottleneck_attention
- self.devices = None
- self.decoder_device_thresh = decoder_device_thresh
- self.additive_skips = additive_skips
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- if self.num_classes is not None:
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
-
- ###############################################################
- # INPUT block
- ###############################################################
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- channels=ch,
- emb_channels=time_embed_dim,
- dropout=dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- )
- if resblock_updown
- else Downsample(
- ch,
- conv_resample,
- dims=dims,
- out_channels=out_ch,
- resample_2d=resample_2d,
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.input_block_chans_bk = input_block_chans[:]
- ################################################################
- # Middle block
- ################################################################
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- ),
- *([AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )] if self.bottleneck_attention else [])
- ,
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- ),
- )
- self._feature_size += ch
-
- ####################################################################
- # OUTPUT BLOCKS
- ####################################################################
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks + 1):
- ich = input_block_chans.pop()
- mid_ch = model_channels * mult if not self.additive_skips else (
- input_block_chans[-1] if input_block_chans else model_channels
- )
- layers = [
- ResBlock(
- ch + ich if not self.additive_skips else ch,
- time_embed_dim,
- dropout,
- out_channels=mid_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- )
- ]
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- mid_ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )
- )
- ch = mid_ch
- if level and i == num_res_blocks:
- out_ch = ch
- layers.append(
- ResBlock(
- mid_ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- )
- if resblock_updown
- else Upsample(
- mid_ch,
- conv_resample,
- dims=dims,
- out_channels=out_ch,
- resample_2d=resample_2d
- )
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- mid_ch = ch
-
- self.out = nn.Sequential(
- normalization(ch, self.num_groups),
- nn.SiLU(),
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
- )
-
- def to(self, *args, **kwargs):
- """
- we overwrite the to() method for the case where we
- distribute parts of our model to different devices
- """
- if isinstance(args[0], (list, tuple)) and len(args[0]) > 1:
- assert not kwargs and len(args) == 1
- # distribute to multiple devices
- self.devices = args[0]
- # move first half to first device, second half to second device
- self.input_blocks.to(self.devices[0])
- self.time_embed.to(self.devices[0])
- self.middle_block.to(self.devices[0]) # maybe devices 0
- for k, b in enumerate(self.output_blocks):
- if k < self.decoder_device_thresh:
- b.to(self.devices[0])
- else: # after threshold
- b.to(self.devices[1])
- self.out.to(self.devices[0])
- print(f"distributed UNet components to devices {self.devices}")
-
- else: # default behaviour
- super().to(*args, **kwargs)
- if self.devices is None: # if self.devices has not been set yet, read it from params
- p = next(self.parameters())
- self.devices = [p.device, p.device]
-
- def forward(self, x, timesteps, y=None):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param y: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- assert (y is not None) == (
- self.num_classes is not None
- ), "must specify y if and only if the model is class-conditional"
- assert x.device == self.devices[0], f"{x.device=} does not match {self.devices[0]=}"
- assert timesteps.device == self.devices[0], f"{timesteps.device=} does not match {self.devices[0]=}"
-
- hs = []
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- if self.num_classes is not None:
- assert y.shape == (x.shape[0],)
- emb = emb + self.label_emb(y)
-
- h = x
- self.hs_shapes = []
- for module in self.input_blocks:
- h = module(h, emb)
- hs.append(h)
- self.hs_shapes.append(h.shape)
-
- h = self.middle_block(h, emb)
-
- for k, module in enumerate(self.output_blocks):
- new_hs = hs.pop()
- if k == self.decoder_device_thresh:
- h = h.to(self.devices[1])
- emb = emb.to(self.devices[1])
- if k >= self.decoder_device_thresh:
- new_hs = new_hs.to(self.devices[1])
-
- if self.additive_skips:
- h = (h + new_hs) / 2
- else:
- h = th.cat([h, new_hs], dim=1)
-
- h = module(h, emb)
- h = h.to(self.devices[0])
- return self.out(h)
-
-
-class SuperResModel(UNetModel):
- """
- A UNetModel that performs super-resolution.
-
- Expects an extra kwarg `low_res` to condition on a low-resolution image.
- """
-
- def __init__(self, image_size, in_channels, *args, **kwargs):
- super().__init__(image_size, in_channels * 2, *args, **kwargs)
-
- def forward(self, x, timesteps, low_res=None, **kwargs):
- _, _, new_height, new_width = x.shape
- upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
- x = th.cat([x, upsampled], dim=1)
- return super().forward(x, timesteps, **kwargs)
-
-
-class EncoderUNetModel(nn.Module):
- """
- The half UNet model with attention and timestep embedding.
-
- For usage, see UNet.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- pool="adaptive",
- num_groups=32,
- resample_2d=True,
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
- self.num_groups = num_groups
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch,
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- ),
- )
- self._feature_size += ch
- self.pool = pool
- # global average pooling
- spatial_dims = (2, 3, 4, 5)[:dims]
- self.gap = lambda x: x.mean(dim=spatial_dims)
- self.cam_feature_maps = None
- print('pool', pool)
- if pool == "adaptive":
- self.out = nn.Sequential(
- normalization(ch, self.num_groups),
- nn.SiLU(),
- nn.AdaptiveAvgPool2d((1, 1)),
- zero_module(conv_nd(dims, ch, out_channels, 1)),
- nn.Flatten(),
- )
- elif pool == "attention":
- assert num_head_channels != -1
- self.out = nn.Sequential(
- normalization(ch, self.num_groups),
- nn.SiLU(),
- AttentionPool2d(
- (image_size // ds), ch, num_head_channels, out_channels
- ),
- )
- elif pool == "spatial":
- print('spatial')
- self.out = nn.Linear(256, self.out_channels)
- elif pool == "spatial_v2":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- normalization(2048, self.num_groups),
- nn.SiLU(),
- nn.Linear(2048, self.out_channels),
- )
- else:
- raise NotImplementedError(f"Unexpected {pool} pooling")
-
-
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :return: an [N x K] Tensor of outputs.
- """
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- results = []
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = self.middle_block(h, emb)
-
-
- if self.pool.startswith("spatial"):
- self.cam_feature_maps = h
- h = self.gap(h)
- N = h.shape[0]
- h = h.reshape(N, -1)
- print('h1', h.shape)
- return self.out(h)
- else:
- h = h.type(x.dtype)
- self.cam_feature_maps = h
- return self.out(h)
diff --git a/wdm-3d-initial/guided_diffusion/wunet.py b/wdm-3d-initial/guided_diffusion/wunet.py
deleted file mode 100644
index 19cec981d803558df54f54aa6c891466a2b62bd0..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/guided_diffusion/wunet.py
+++ /dev/null
@@ -1,795 +0,0 @@
-from abc import abstractmethod
-
-import math
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .nn import checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding
-from DWT_IDWT.DWT_IDWT_layer import DWT_3D, IDWT_3D
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- A wavelet upsampling layer with an optional convolution on the skip connections used to perform upsampling.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True, use_freq=True):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- self.resample_2d = resample_2d
-
- self.use_freq = use_freq
- self.idwt = IDWT_3D("haar")
-
- # Grouped convolution on 7 high frequency subbands (skip connections)
- if use_conv:
- self.conv = conv_nd(dims, self.channels * 7, self.out_channels * 7, 3, padding=1, groups=7)
-
- def forward(self, x):
- if isinstance(x, tuple):
- skip = x[1]
- x = x[0]
- assert x.shape[1] == self.channels
-
- if self.use_conv:
- skip = self.conv(th.cat(skip, dim=1) / 3.) * 3.
- skip = tuple(th.chunk(skip, 7, dim=1))
-
- if self.use_freq:
- x = self.idwt(3. * x, skip[0], skip[1], skip[2], skip[3], skip[4], skip[5], skip[6])
- else:
- if self.dims == 3 and self.resample_2d:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
-
- return x, None
-
-
-class Downsample(nn.Module):
- """
- A wavelet downsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, resample_2d=True, use_freq=True):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
-
- self.use_freq = use_freq
- self.dwt = DWT_3D("haar")
-
- stride = (1, 2, 2) if dims == 3 and resample_2d else 2
-
- if use_conv:
- self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1)
- elif self.use_freq:
- self.op = self.dwt
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- if self.use_freq:
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = self.op(x)
- x = (LLL / 3., (LLH, LHL, LHH, HLL, HLH, HHL, HHH))
- else:
- x = self.op(x)
- return x
-
-
-class WaveletDownsample(nn.Module):
- """
- Implements the wavelet downsampling blocks used to generate the input residuals.
-
- :param in_ch: number of input channels.
- :param out_ch: number of output channels (should match the feature size of the corresponding U-Net level)
- """
- def __init__(self, in_ch=None, out_ch=None):
- super().__init__()
- out_ch = out_ch if out_ch else in_ch
- self.in_ch = in_ch
- self.out_ch = out_ch
- self.conv = conv_nd(3, self.in_ch * 8, self.out_ch, 3, stride=1, padding=1)
- self.dwt = DWT_3D('haar')
-
- def forward(self, x):
- LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH = self.dwt(x)
- x = th.cat((LLL, LLH, LHL, LHH, HLL, HLH, HHL, HHH), dim=1) / 3.
- return self.conv(x)
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels via up- or downsampling.
-
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels, otherwise out_channels = channels.
- :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1
- convolution to change the channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- :param num_groups: if specified, the number of groups in the (adaptive) group normalization layers.
- :param use_freq: specifies if frequency aware up- or downsampling should be used.
- :param z_emb_dim: the dimension of the z-embedding.
-
- """
-
- def __init__(self, channels, emb_channels, dropout, out_channels=None, use_conv=True, use_scale_shift_norm=False,
- dims=2, use_checkpoint=False, up=False, down=False, num_groups=32, resample_2d=True, use_freq=False):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_scale_shift_norm = use_scale_shift_norm
- self.use_checkpoint = use_checkpoint
- self.up = up
- self.down = down
- self.num_groups = num_groups
- self.use_freq = use_freq
-
-
- # Define (adaptive) group normalization layers
- self.in_layers = nn.Sequential(
- normalization(channels, self.num_groups),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- # Check if up- or downsampling should be performed by this ResBlock
- self.updown = up or down
- if up:
- self.h_upd = Upsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- self.x_upd = Upsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- elif down:
- self.h_upd = Downsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- self.x_upd = Downsample(channels, False, dims, resample_2d=resample_2d, use_freq=self.use_freq)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- # Define the timestep embedding layers
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels),
- )
-
- # Define output layers including (adaptive) group normalization
- self.out_layers = nn.Sequential(
- normalization(self.out_channels, self.num_groups),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)),
- )
-
- # Define skip branch
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
-
- def forward(self, x, temb):
- # Make sure to pipe skip connections
- if isinstance(x, tuple):
- hSkip = x[1]
- else:
- hSkip = None
-
- # Forward pass for ResBlock with up- or downsampling
- if self.updown:
- if self.up:
- x = x[0]
- h = self.in_layers(x)
-
- if self.up:
- h = (h, hSkip)
- x = (x, hSkip)
-
- h, hSkip = self.h_upd(h) # Updown in main branch (ResBlock)
- x, xSkip = self.x_upd(x) # Updown in skip-connection (ResBlock)
-
- # Forward pass for standard ResBlock
- else:
- if isinstance(x, tuple): # Check for skip connection tuple
- x = x[0]
- h = self.in_layers(x)
-
- # Common layers for both standard and updown ResBlocks
- emb_out = self.emb_layers(temb)
-
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
-
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
-
- else:
- h = h + emb_out # Add timestep embedding
- h = self.out_layers(h) # Forward pass out layers
-
- # Add skip connections
- out = self.skip_connection(x) + h
- out = out, hSkip
-
- return out
-
-
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
-
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- num_groups=32,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels, num_groups)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(self._forward, (x,), self.parameters(), True)
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1)
- qkv = self.qkv(self.norm(x))
- h = self.attention(qkv)
- h = self.proj_out(h)
- return (x + h).reshape(b, c, *spatial)
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial ** 2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class WavUNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
-
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set,
- list, or tuple. For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially increased efficiency.
- """
-
- def __init__(self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions,
- dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None,
- use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1,
- use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, num_groups=32,
- bottleneck_attention=True, resample_2d=True, additive_skips=False, decoder_device_thresh=0,
- use_freq=False, progressive_input='residual'):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- # self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- # self.num_heads = num_heads
- # self.num_head_channels = num_head_channels
- # self.num_heads_upsample = num_heads_upsample
- self.num_groups = num_groups
- self.bottleneck_attention = bottleneck_attention
- self.devices = None
- self.decoder_device_thresh = decoder_device_thresh
- self.additive_skips = additive_skips
- self.use_freq = use_freq
- self.progressive_input = progressive_input
-
- #############################
- # TIMESTEP EMBEDDING layers #
- #############################
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim))
-
- ###############
- # INPUT block #
- ###############
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
-
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- input_pyramid_channels =in_channels
- ds = 1
-
- ######################################
- # DOWNWARD path - Feature extraction #
- ######################################
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks): # Adding Residual blocks
- layers = [
- ResBlock(
- channels=ch,
- emb_channels=time_embed_dim,
- dropout=dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- ]
- ch = mult * model_channels # New input channels = channel_mult * base_channels
- # (first ResBlock performs channel adaption)
-
- if ds in attention_resolutions: # Adding Attention layers
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )
- )
-
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
-
- # Adding downsampling operation
- out_ch = ch
- layers = []
- layers.append(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- if resblock_updown
- else Downsample(
- ch,
- conv_resample,
- dims=dims,
- out_channels=out_ch,
- resample_2d=resample_2d,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
-
- layers = []
- if self.progressive_input == 'residual':
- layers.append(WaveletDownsample(in_ch=input_pyramid_channels, out_ch=out_ch))
- input_pyramid_channels = out_ch
-
- self.input_blocks.append(TimestepEmbedSequential(*layers))
-
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.input_block_chans_bk = input_block_chans[:]
-
- #########################
- # LATENT/ MIDDLE blocks #
- #########################
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- ),
- *([AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )] if self.bottleneck_attention else [])
- ,
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- ),
- )
- self._feature_size += ch
-
- #################################
- # UPWARD path - feature mapping #
- #################################
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks+1): # Adding Residual blocks
- if not i == num_res_blocks:
- mid_ch = model_channels * mult
-
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mid_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- ]
- if ds in attention_resolutions: # Adding Attention layers
- layers.append(
- AttentionBlock(
- mid_ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- num_groups=self.num_groups,
- )
- )
- ch = mid_ch
- else: # Adding upsampling operation
- out_ch = ch
- layers.append(
- ResBlock(
- mid_ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- if resblock_updown
- else Upsample(
- mid_ch,
- conv_resample,
- dims=dims,
- out_channels=out_ch,
- resample_2d=resample_2d
- )
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- mid_ch = ch
-
- ################
- # Out ResBlock #
- ################
- self.out_res = nn.ModuleList([])
- for i in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- num_groups=self.num_groups,
- resample_2d=resample_2d,
- use_freq=self.use_freq,
- )
- ]
- self.out_res.append(TimestepEmbedSequential(*layers))
-
- ################
- # OUTPUT block #
- ################
- self.out = nn.Sequential(
- normalization(ch, self.num_groups),
- nn.SiLU(),
- conv_nd(dims, model_channels, out_channels, 3, padding=1),
- )
-
- def to(self, *args, **kwargs):
- """
- we overwrite the to() method for the case where we
- distribute parts of our model to different devices
- """
- if isinstance(args[0], (list, tuple)) and len(args[0]) > 1:
- assert not kwargs and len(args) == 1
- # distribute to multiple devices
- self.devices = args[0]
- # move first half to first device, second half to second device
- self.input_blocks.to(self.devices[0])
- self.time_embed.to(self.devices[0])
- self.middle_block.to(self.devices[0]) # maybe devices 0
- for k, b in enumerate(self.output_blocks):
- if k < self.decoder_device_thresh:
- b.to(self.devices[0])
- else: # after threshold
- b.to(self.devices[1])
- self.out.to(self.devices[0])
- print(f"distributed UNet components to devices {self.devices}")
-
- else: # default behaviour
- super().to(*args, **kwargs)
- if self.devices is None: # if self.devices has not been set yet, read it from params
- p = next(self.parameters())
- self.devices = [p.device, p.device]
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param zemb: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- hs = [] # Save skip-connections here
- input_pyramid = x
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) # Gen sinusoidal timestep embedding
- h = x
- self.hs_shapes = []
-
- for module in self.input_blocks:
- if not isinstance(module[0], WaveletDownsample):
- h = module(h, emb) # Run a downstream module
- skip = None
- if isinstance(h, tuple): # Check for skip features (tuple of high frequency subbands) and store in hs
- h, skip = h
- hs.append(skip)
- self.hs_shapes.append(h.shape)
- else:
- input_pyramid = module(input_pyramid, emb)
- input_pyramid = input_pyramid + h
- h = input_pyramid
-
- for module in self.middle_block:
- h = module(h, emb)
- if isinstance(h, tuple):
- h, skip = h
-
- for module in self.output_blocks:
- new_hs = hs.pop()
- if new_hs:
- skip = new_hs
-
- # Use additive skip connections
- if self.additive_skips:
- h = (h + new_hs) / np.sqrt(2)
-
- # Use frequency aware skip connections
- elif self.use_freq: # You usually want to use the frequency aware upsampling
- if isinstance(h, tuple): # Replace None with the stored skip features
- l = list(h)
- l[1] = skip
- h = tuple(l)
- else:
- h = (h, skip)
-
- # Use concatenation
- else:
- h = th.cat([h, new_hs], dim=1)
-
- h = module(h, emb) # Run an upstream module
-
- for module in self.out_res:
- h = module(h, emb)
-
- h, _ = h
- return self.out(h)
diff --git a/wdm-3d-initial/run.sh b/wdm-3d-initial/run.sh
deleted file mode 100644
index dbd098af6a1191275d2c214ae043beadaac06b0a..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/run.sh
+++ /dev/null
@@ -1,124 +0,0 @@
-# general settings
-GPU=0; # gpu to use
-SEED=42; # randomness seed for sampling
-CHANNELS=64; # number of model base channels (we use 64 for all experiments)
-MODE='train'; # train vs sample
-DATASET='brats'; # brats or lidc-idri
-MODEL='ours_unet_128'; # 'ours_unet_256', 'ours_wnet_128', 'ours_wnet_256'
-
-# settings for sampling/inference
-ITERATIONS=0; # training iteration (as a multiple of 1k) checkpoint to use for sampling
-SAMPLING_STEPS=0; # number of steps for accelerated sampling, 0 for the default 1000
-RUN_DIR=""; # tensorboard dir to be set for the evaluation
-
-# detailed settings (no need to change for reproducing)
-if [[ $MODEL == 'ours_unet_128' ]]; then
- echo "MODEL: WDM (U-Net) 128 x 128 x 128";
- CHANNEL_MULT=1,2,2,4,4;
- IMAGE_SIZE=128;
- ADDITIVE_SKIP=True;
- USE_FREQ=False;
- BATCH_SIZE=10;
-elif [[ $MODEL == 'ours_unet_256' ]]; then
- echo "MODEL: WDM (U-Net) 256 x 256 x 256";
- CHANNEL_MULT=1,2,2,4,4,4;
- IMAGE_SIZE=256;
- ADDITIVE_SKIP=True;
- USE_FREQ=False;
- BATCH_SIZE=1;
-elif [[ $MODEL == 'ours_wnet_128' ]]; then
- echo "MODEL: WDM (WavU-Net) 128 x 128 x 128";
- CHANNEL_MULT=1,2,2,4,4;
- IMAGE_SIZE=128;
- ADDITIVE_SKIP=False;
- USE_FREQ=True;
- BATCH_SIZE=10;
-elif [[ $MODEL == 'ours_wnet_256' ]]; then
- echo "MODEL: WDM (WavU-Net) 256 x 256 x 256";
- CHANNEL_MULT=1,2,2,4,4,4;
- IMAGE_SIZE=256;
- ADDITIVE_SKIP=False;
- USE_FREQ=True;
- BATCH_SIZE=1;
-else
- echo "MODEL TYPE NOT FOUND -> Check the supported configurations again";
-fi
-
-# some information and overwriting batch size for sampling
-# (overwrite in case you want to sample with a higher batch size)
-# no need to change for reproducing
-if [[ $MODE == 'sample' ]]; then
- echo "MODE: sample"
- BATCH_SIZE=1;
-elif [[ $MODE == 'train' ]]; then
- if [[ $DATASET == 'brats' ]]; then
- echo "MODE: training";
- echo "DATASET: BRATS";
- DATA_DIR=~/wdm-3d/data/BRATS/;
- elif [[ $DATASET == 'lidc-idri' ]]; then
- echo "MODE: training";
- echo "Dataset: LIDC-IDRI";
- DATA_DIR=~/wdm-3d/data/LIDC-IDRI/;
- else
- echo "DATASET NOT FOUND -> Check the supported datasets again";
- fi
-fi
-
-COMMON="
---dataset=${DATASET}
---num_channels=${CHANNELS}
---class_cond=False
---num_res_blocks=2
---num_heads=1
---learn_sigma=False
---use_scale_shift_norm=False
---attention_resolutions=
---channel_mult=${CHANNEL_MULT}
---diffusion_steps=1000
---noise_schedule=linear
---rescale_learned_sigmas=False
---rescale_timesteps=False
---dims=3
---batch_size=${BATCH_SIZE}
---num_groups=32
---in_channels=8
---out_channels=8
---bottleneck_attention=False
---resample_2d=False
---renormalize=True
---additive_skips=${ADDITIVE_SKIP}
---use_freq=${USE_FREQ}
---predict_xstart=True
-"
-TRAIN="
---data_dir=${DATA_DIR}
---resume_checkpoint=
---resume_step=0
---image_size=${IMAGE_SIZE}
---use_fp16=False
---lr=1e-5
---save_interval=100000
---num_workers=24
---devices=${GPU}
-"
-SAMPLE="
---data_dir=${DATA_DIR}
---data_mode=${DATA_MODE}
---seed=${SEED}
---image_size=${IMAGE_SIZE}
---use_fp16=False
---model_path=./${RUN_DIR}/checkpoints/${DATASET}_${ITERATIONS}000.pt
---devices=${GPU}
---output_dir=./results/${RUN_DIR}/${DATASET}_${MODEL}_${ITERATIONS}000/
---num_samples=1000
---use_ddim=False
---sampling_steps=${SAMPLING_STEPS}
---clip_denoised=True
-"
-
-# run the python scripts
-if [[ $MODE == 'train' ]]; then
- python scripts/generation_train.py $TRAIN $COMMON;
-else
- python scripts/generation_sample.py $SAMPLE $COMMON;
-fi
diff --git a/wdm-3d-initial/scripts/.ipynb_checkpoints/generation_train-checkpoint.py b/wdm-3d-initial/scripts/.ipynb_checkpoints/generation_train-checkpoint.py
deleted file mode 100644
index 8ddb1a05402ceb75b93e8f3bed97226b580defba..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/scripts/.ipynb_checkpoints/generation_train-checkpoint.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-A script for training a diffusion model to unconditional image generation.
-"""
-
-import argparse
-import numpy as np
-import random
-import sys
-import torch as th
-
-sys.path.append(".")
-sys.path.append("..")
-
-from guided_diffusion import (dist_util,
- logger)
-from guided_diffusion.bratsloader import BRATSVolumes
-from guided_diffusion.lidcloader import LIDCVolumes
-from guided_diffusion.inpaintloader import InpaintVolumes
-from guided_diffusion.resample import create_named_schedule_sampler
-from guided_diffusion.script_util import (model_and_diffusion_defaults,
- create_model_and_diffusion,
- args_to_dict,
- add_dict_to_argparser)
-from guided_diffusion.train_util import TrainLoop
-from torch.utils.tensorboard import SummaryWriter
-
-
-def main():
- args = create_argparser().parse_args()
- seed = args.seed
- th.manual_seed(seed)
- np.random.seed(seed)
- random.seed(seed)
-
- summary_writer = None
- if args.use_tensorboard:
- logdir = None
- if args.tensorboard_path:
- logdir = args.tensorboard_path
- summary_writer = SummaryWriter(log_dir=logdir)
- summary_writer.add_text(
- 'config',
- '\n'.join([f'--{k}={repr(v)}
' for k, v in vars(args).items()])
- )
- logger.configure(dir=summary_writer.get_logdir())
- else:
- logger.configure()
-
- dist_util.setup_dist(devices=args.devices)
-
- logger.log("Creating model and diffusion...")
- arguments = args_to_dict(args, model_and_diffusion_defaults().keys())
- model, diffusion = create_model_and_diffusion(**arguments)
-
- # logger.log("Number of trainable parameters: {}".format(np.array([np.array(p.shape).prod() for p in model.parameters()]).sum()))
- model.to(dist_util.dev([0, 1]) if len(args.devices) > 1 else dist_util.dev()) # allow for 2 devices
- schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion, maxt=1000)
-
- if args.dataset == 'brats':
- assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
- ds = BRATSVolumes(args.data_dir, test_flag=False,
- normalize=(lambda x: 2*x - 1) if args.renormalize else None,
- mode='train',
- img_size=args.image_size)
-
- elif args.dataset == 'lidc-idri':
- assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
- ds = LIDCVolumes(args.data_dir, test_flag=False,
- normalize=(lambda x: 2*x - 1) if args.renormalize else None,
- mode='train',
- img_size=args.image_size)
-
- elif args.dataset == 'inpaint':
- assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
- ds = InpaintVolumes(args.data_dir,
- normalize=(lambda x: 2*x - 1) if args.renormalize else None,
- mode='train',
- img_size=args.image_size)
-
- datal = th.utils.data.DataLoader(ds,
- batch_size=args.batch_size,
- num_workers=args.num_workers,
- shuffle=True,
- )
-
- logger.log("Start training...")
- TrainLoop(
- model=model,
- diffusion=diffusion,
- data=datal,
- batch_size=args.batch_size,
- in_channels=args.in_channels,
- image_size=args.image_size,
- microbatch=args.microbatch,
- lr=args.lr,
- ema_rate=args.ema_rate,
- log_interval=args.log_interval,
- save_interval=args.save_interval,
- resume_checkpoint=args.resume_checkpoint,
- resume_step=args.resume_step,
- use_fp16=args.use_fp16,
- fp16_scale_growth=args.fp16_scale_growth,
- schedule_sampler=schedule_sampler,
- weight_decay=args.weight_decay,
- lr_anneal_steps=args.lr_anneal_steps,
- dataset=args.dataset,
- summary_writer=summary_writer,
- mode='default',
- ).run_loop()
-
-
-def create_argparser():
- defaults = dict(
- seed=0,
- data_dir="",
- schedule_sampler="uniform",
- lr=1e-4,
- weight_decay=0.0,
- lr_anneal_steps=0,
- batch_size=1,
- microbatch=-1,
- ema_rate="0.9999",
- log_interval=100,
- save_interval=5000,
- resume_checkpoint='',
- resume_step=0,
- use_fp16=False,
- fp16_scale_growth=1e-3,
- dataset='inpaint',
- use_tensorboard=True,
- tensorboard_path='', # set path to existing logdir for resuming
- devices=[0],
- dims=3,
- learn_sigma=False,
- num_groups=32,
- channel_mult="1,2,2,4,4",
- in_channels=8,
- out_channels=8,
- bottleneck_attention=False,
- num_workers=0,
- mode='default',
- renormalize=True,
- additive_skips=False,
- use_freq=False,
- )
- defaults.update(model_and_diffusion_defaults())
- parser = argparse.ArgumentParser()
- add_dict_to_argparser(parser, defaults)
- return parser
-
-
-if __name__ == "__main__":
- main()
diff --git a/wdm-3d-initial/scripts/generation_sample.py b/wdm-3d-initial/scripts/generation_sample.py
deleted file mode 100644
index dd2cd514fe2cbab721ddf9fae513cf22e9167942..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/scripts/generation_sample.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""
-A script for sampling from a diffusion model for unconditional image generation.
-"""
-
-import argparse
-import nibabel as nib
-import numpy as np
-import os
-import pathlib
-import random
-import sys
-import torch as th
-
-sys.path.append(".")
-
-from guided_diffusion import (dist_util,
- logger)
-from guided_diffusion.script_util import (model_and_diffusion_defaults,
- create_model_and_diffusion,
- add_dict_to_argparser,
- args_to_dict,
- )
-from DWT_IDWT.DWT_IDWT_layer import IDWT_3D
-
-
-def visualize(img):
- _min = img.min()
- _max = img.max()
- normalized_img = (img - _min)/ (_max - _min)
- return normalized_img
-
-
-def dice_score(pred, targs):
- pred = (pred>0).float()
- return 2. * (pred*targs).sum() / (pred+targs).sum()
-
-
-def main():
- args = create_argparser().parse_args()
- seed = args.seed
- dist_util.setup_dist(devices=args.devices)
- logger.configure()
-
- logger.log("Creating model and diffusion...")
- model, diffusion = create_model_and_diffusion(
- **args_to_dict(args, model_and_diffusion_defaults().keys())
- )
- logger.log("Load model from: {}".format(args.model_path))
- model.load_state_dict(dist_util.load_state_dict(args.model_path, map_location="cpu"))
- model.to(dist_util.dev([0, 1]) if len(args.devices) > 1 else dist_util.dev()) # allow for 2 devices
-
- if args.use_fp16:
- raise ValueError("fp16 currently not implemented")
-
- model.eval()
- idwt = IDWT_3D("haar")
-
- for ind in range(args.num_samples // args.batch_size):
- th.manual_seed(seed)
- np.random.seed(seed)
- random.seed(seed)
- # print(f"Reseeded (in for loop) to {seed}")
-
- seed += 1
-
- img = th.randn(args.batch_size, # Batch size
- 8, # 8 wavelet coefficients
- args.image_size//2, # Half spatial resolution (D)
- args.image_size//2, # Half spatial resolution (H)
- args.image_size//2, # Half spatial resolution (W)
- ).to(dist_util.dev())
-
- model_kwargs = {}
-
- sample_fn = diffusion.p_sample_loop
-
- sample = sample_fn(model=model,
- shape=img.shape,
- noise=img,
- clip_denoised=args.clip_denoised,
- model_kwargs=model_kwargs,
- )
-
- B, _, D, H, W = sample.size()
-
- sample = idwt(sample[:, 0, :, :, :].view(B, 1, D, H, W) * 3.,
- sample[:, 1, :, :, :].view(B, 1, D, H, W),
- sample[:, 2, :, :, :].view(B, 1, D, H, W),
- sample[:, 3, :, :, :].view(B, 1, D, H, W),
- sample[:, 4, :, :, :].view(B, 1, D, H, W),
- sample[:, 5, :, :, :].view(B, 1, D, H, W),
- sample[:, 6, :, :, :].view(B, 1, D, H, W),
- sample[:, 7, :, :, :].view(B, 1, D, H, W))
-
- sample = (sample + 1) / 2.
-
-
- if len(sample.shape) == 5:
- sample = sample.squeeze(dim=1) # don't squeeze batch dimension for bs 1
-
- pathlib.Path(args.output_dir).mkdir(parents=True, exist_ok=True)
- for i in range(sample.shape[0]):
- output_name = os.path.join(args.output_dir, f'sample_{ind}_{i}.nii.gz')
- img = nib.Nifti1Image(sample.detach().cpu().numpy()[i, :, :, :], np.eye(4))
- nib.save(img=img, filename=output_name)
- print(f'Saved to {output_name}')
-
-
-def create_argparser():
- defaults = dict(
- seed=0,
- data_dir="",
- data_mode='validation',
- clip_denoised=True,
- num_samples=1,
- batch_size=1,
- use_ddim=False,
- class_cond=False,
- sampling_steps=0,
- model_path="",
- devices=[0],
- output_dir='./results',
- mode='default',
- renormalize=False,
- image_size=256,
- half_res_crop=False,
- concat_coords=False, # if true, add 3 (for 3d) or 2 (for 2d) to in_channels
- )
- defaults.update({k:v for k, v in model_and_diffusion_defaults().items() if k not in defaults})
- parser = argparse.ArgumentParser()
- add_dict_to_argparser(parser, defaults)
- return parser
-
-
-if __name__ == "__main__":
- main()
diff --git a/wdm-3d-initial/scripts/generation_train.py b/wdm-3d-initial/scripts/generation_train.py
deleted file mode 100644
index 8ddb1a05402ceb75b93e8f3bed97226b580defba..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/scripts/generation_train.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-A script for training a diffusion model to unconditional image generation.
-"""
-
-import argparse
-import numpy as np
-import random
-import sys
-import torch as th
-
-sys.path.append(".")
-sys.path.append("..")
-
-from guided_diffusion import (dist_util,
- logger)
-from guided_diffusion.bratsloader import BRATSVolumes
-from guided_diffusion.lidcloader import LIDCVolumes
-from guided_diffusion.inpaintloader import InpaintVolumes
-from guided_diffusion.resample import create_named_schedule_sampler
-from guided_diffusion.script_util import (model_and_diffusion_defaults,
- create_model_and_diffusion,
- args_to_dict,
- add_dict_to_argparser)
-from guided_diffusion.train_util import TrainLoop
-from torch.utils.tensorboard import SummaryWriter
-
-
-def main():
- args = create_argparser().parse_args()
- seed = args.seed
- th.manual_seed(seed)
- np.random.seed(seed)
- random.seed(seed)
-
- summary_writer = None
- if args.use_tensorboard:
- logdir = None
- if args.tensorboard_path:
- logdir = args.tensorboard_path
- summary_writer = SummaryWriter(log_dir=logdir)
- summary_writer.add_text(
- 'config',
- '\n'.join([f'--{k}={repr(v)}
' for k, v in vars(args).items()])
- )
- logger.configure(dir=summary_writer.get_logdir())
- else:
- logger.configure()
-
- dist_util.setup_dist(devices=args.devices)
-
- logger.log("Creating model and diffusion...")
- arguments = args_to_dict(args, model_and_diffusion_defaults().keys())
- model, diffusion = create_model_and_diffusion(**arguments)
-
- # logger.log("Number of trainable parameters: {}".format(np.array([np.array(p.shape).prod() for p in model.parameters()]).sum()))
- model.to(dist_util.dev([0, 1]) if len(args.devices) > 1 else dist_util.dev()) # allow for 2 devices
- schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion, maxt=1000)
-
- if args.dataset == 'brats':
- assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
- ds = BRATSVolumes(args.data_dir, test_flag=False,
- normalize=(lambda x: 2*x - 1) if args.renormalize else None,
- mode='train',
- img_size=args.image_size)
-
- elif args.dataset == 'lidc-idri':
- assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
- ds = LIDCVolumes(args.data_dir, test_flag=False,
- normalize=(lambda x: 2*x - 1) if args.renormalize else None,
- mode='train',
- img_size=args.image_size)
-
- elif args.dataset == 'inpaint':
- assert args.image_size in [128, 256], "We currently just support image sizes: 128, 256"
- ds = InpaintVolumes(args.data_dir,
- normalize=(lambda x: 2*x - 1) if args.renormalize else None,
- mode='train',
- img_size=args.image_size)
-
- datal = th.utils.data.DataLoader(ds,
- batch_size=args.batch_size,
- num_workers=args.num_workers,
- shuffle=True,
- )
-
- logger.log("Start training...")
- TrainLoop(
- model=model,
- diffusion=diffusion,
- data=datal,
- batch_size=args.batch_size,
- in_channels=args.in_channels,
- image_size=args.image_size,
- microbatch=args.microbatch,
- lr=args.lr,
- ema_rate=args.ema_rate,
- log_interval=args.log_interval,
- save_interval=args.save_interval,
- resume_checkpoint=args.resume_checkpoint,
- resume_step=args.resume_step,
- use_fp16=args.use_fp16,
- fp16_scale_growth=args.fp16_scale_growth,
- schedule_sampler=schedule_sampler,
- weight_decay=args.weight_decay,
- lr_anneal_steps=args.lr_anneal_steps,
- dataset=args.dataset,
- summary_writer=summary_writer,
- mode='default',
- ).run_loop()
-
-
-def create_argparser():
- defaults = dict(
- seed=0,
- data_dir="",
- schedule_sampler="uniform",
- lr=1e-4,
- weight_decay=0.0,
- lr_anneal_steps=0,
- batch_size=1,
- microbatch=-1,
- ema_rate="0.9999",
- log_interval=100,
- save_interval=5000,
- resume_checkpoint='',
- resume_step=0,
- use_fp16=False,
- fp16_scale_growth=1e-3,
- dataset='inpaint',
- use_tensorboard=True,
- tensorboard_path='', # set path to existing logdir for resuming
- devices=[0],
- dims=3,
- learn_sigma=False,
- num_groups=32,
- channel_mult="1,2,2,4,4",
- in_channels=8,
- out_channels=8,
- bottleneck_attention=False,
- num_workers=0,
- mode='default',
- renormalize=True,
- additive_skips=False,
- use_freq=False,
- )
- defaults.update(model_and_diffusion_defaults())
- parser = argparse.ArgumentParser()
- add_dict_to_argparser(parser, defaults)
- return parser
-
-
-if __name__ == "__main__":
- main()
diff --git a/wdm-3d-initial/utils/preproc_lidc-idri.py b/wdm-3d-initial/utils/preproc_lidc-idri.py
deleted file mode 100644
index 690ed07f66a1bfba337e09f47a5074ccf6edbfce..0000000000000000000000000000000000000000
--- a/wdm-3d-initial/utils/preproc_lidc-idri.py
+++ /dev/null
@@ -1,92 +0,0 @@
-"""
-Script for preprocessing the LIDC-IDRI dataset.
-"""
-import argparse
-import os
-import shutil
-import dicom2nifti
-import nibabel as nib
-import numpy as np
-from scipy.ndimage import zoom
-
-
-def preprocess_nifti(input_path, output_path):
- # Load the Nifti image
- print('Process image: {}'.format(input_path))
- img = nib.load(input_path)
-
- # Get the current voxel sizes
- voxel_sizes = img.header.get_zooms()
-
- # Calculate the target voxel size (1mm x 1mm x 1mm)
- target_voxel_size = (1.0, 1.0, 1.0)
-
- # Calculate the resampling factor
- zoom_factors = [current / target for target, current in zip(target_voxel_size, voxel_sizes)]
-
- # Resample the image
- print("[1] Resample the image ...")
- resampled_data = zoom(img.get_fdata(), zoom_factors, order=3, mode='nearest')
-
- print("[2] Center crop the image ...")
- crop_size = (256, 256, 256)
- depth, height, width = resampled_data.shape
-
- d_start = (depth - crop_size[0]) // 2
- h_start = (height - crop_size[1]) // 2
- w_start = (width - crop_size[2]) // 2
- cropped_arr = resampled_data[d_start:d_start + crop_size[0], h_start:h_start + crop_size[1], w_start:w_start + crop_size[2]]
-
- print("[3] Clip all values below -1000 ...")
- cropped_arr[cropped_arr < -1000] = -1000
-
- print("[4] Clip the upper quantile (0.999) to remove outliers ...")
- out_clipped = np.clip(cropped_arr, -1000, np.quantile(cropped_arr, 0.999))
-
- print("[5] Normalize the image ...")
- out_normalized = (out_clipped - np.min(out_clipped)) / (np.max(out_clipped) - np.min(out_clipped))
-
- assert out_normalized.shape == (256, 256, 256), "The output shape should be (320,320,320)"
-
- print("[6] FINAL REPORT: Min value: {}, Max value: {}, Shape: {}".format(out_normalized.min(),
- out_normalized.max(),
- out_normalized.shape))
- print("-------------------------------------------------------------------------------")
- # Save the resampled image
- resampled_img = nib.Nifti1Image(out_normalized, np.eye(4))
- nib.save(resampled_img, output_path)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument('--dicom_dir', type=str, required=True,
- help='Directory containing the original dicom data')
- parser.add_argument('--nifti_dir', type=str, required=True,
- help='Directory to store the processed nifti files')
- parser.add_argument('--delete_unprocessed', type=eval, default=True,
- help='Set true to delete the unprocessed nifti files')
- args = parser.parse_args()
-
- # Convert DICOM to nifti
- for patient in os.listdir(args.dicom_dir):
- print('Convert {} to nifti'.format(patient))
- if not os.path.exists(os.path.join(args.nifti_dir, patient)):
- os.makedirs(os.path.join(args.nifti_dir, patient))
- dicom2nifti.convert_directory(os.path.join(args.dicom_dir, patient),
- os.path.join(args.nifti_dir, patient))
- shutil.rmtree(os.path.join(args.dicom_dir, patient))
-
- # Preprocess nifti files
- for root, dirs, files in os.walk(args.nifti_dir):
- for file in files:
- try:
- preprocess_nifti(os.path.join(root, file), os.path.join(root, 'processed.nii.gz'))
- except:
- print("Error occurred for file: {}".format(file))
-
- # Delete unprocessed nifti files
- if args.delete_unprocessed:
- for root, dirs, files in os.walk(args.nifti_dir):
- for file in files:
- if not file == 'processed.nii.gz':
- os.remove(os.path.join(root, file))