repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/transformer/SubLayers.py | ''' Define the sublayers in encoder/decoder layer '''
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from transformer.Modules import ScaledDotProductAttention
__author__ = "Yu-Hsiang Huang"
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
| 2,606 | 30.409639 | 96 | py |
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/transformer/Modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
__author__ = "Yu-Hsiang Huang"
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
| 674 | 24.961538 | 68 | py |
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/transformer/Models.py | ''' Define the Transformer model '''
import torch
import torch.nn as nn
import numpy as np
from transformer.Layers import EncoderLayer, DecoderLayer
__author__ = "Yu-Hsiang Huang"
def get_pad_mask(seq, pad_idx):
return (seq != pad_idx).unsqueeze(-2)
def get_subsequent_mask(seq):
''' For masking out the subsequent info. '''
sz_b, len_s = seq.size()
subsequent_mask = (1 - torch.triu(
torch.ones((1, len_s, len_s), device=seq.device), diagonal=1)).bool()
return subsequent_mask
class PositionalEncoding(nn.Module):
def __init__(self, d_hid, n_position=200):
super(PositionalEncoding, self).__init__()
# Not a parameter
self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))
def _get_sinusoid_encoding_table(self, n_position, d_hid):
''' Sinusoid position encoding table '''
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def forward(self, x):
return x + self.pos_table[:, :x.size(1)].clone().detach()
class Encoder(nn.Module):
''' A encoder model with self attention mechanism. '''
def __init__(
self, n_src_vocab, d_word_vec, n_layers, n_head, d_k, d_v,
d_model, d_inner, pad_idx, dropout=0.1, n_position=200, scale_emb=False):
super().__init__()
self.src_word_emb = nn.Embedding(n_src_vocab, d_word_vec, padding_idx=pad_idx)
self.position_enc = PositionalEncoding(d_word_vec, n_position=n_position)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.scale_emb = scale_emb
self.d_model = d_model
def forward(self, src_seq, src_mask, return_attns=False):
enc_slf_attn_list = []
# -- Forward
enc_output = self.src_word_emb(src_seq)
if self.scale_emb:
enc_output *= self.d_model ** 0.5
enc_output = self.dropout(self.position_enc(enc_output))
enc_output = self.layer_norm(enc_output)
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(enc_output, slf_attn_mask=src_mask)
enc_slf_attn_list += [enc_slf_attn] if return_attns else []
if return_attns:
return enc_output, enc_slf_attn_list
return enc_output,
class Decoder(nn.Module):
''' A decoder model with self attention mechanism. '''
def __init__(
self, n_trg_vocab, d_word_vec, n_layers, n_head, d_k, d_v,
d_model, d_inner, pad_idx, n_position=200, dropout=0.1, scale_emb=False):
super().__init__()
self.trg_word_emb = nn.Embedding(n_trg_vocab, d_word_vec, padding_idx=pad_idx)
self.position_enc = PositionalEncoding(d_word_vec, n_position=n_position)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
DecoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.scale_emb = scale_emb
self.d_model = d_model
def forward(self, trg_seq, trg_mask, enc_output, src_mask, return_attns=False):
dec_slf_attn_list, dec_enc_attn_list = [], []
# -- Forward
dec_output = self.trg_word_emb(trg_seq)
if self.scale_emb:
dec_output *= self.d_model ** 0.5
dec_output = self.dropout(self.position_enc(dec_output))
dec_output = self.layer_norm(dec_output)
for dec_layer in self.layer_stack:
dec_output, dec_slf_attn, dec_enc_attn = dec_layer(
dec_output, enc_output, slf_attn_mask=trg_mask, dec_enc_attn_mask=src_mask)
dec_slf_attn_list += [dec_slf_attn] if return_attns else []
dec_enc_attn_list += [dec_enc_attn] if return_attns else []
if return_attns:
return dec_output, dec_slf_attn_list, dec_enc_attn_list
return dec_output,
class Transformer(nn.Module):
''' A sequence to sequence model with attention mechanism. '''
def __init__(
self, n_src_vocab, n_trg_vocab, src_pad_idx, trg_pad_idx,
d_word_vec=512, d_model=512, d_inner=2048,
n_layers=6, n_head=8, d_k=64, d_v=64, dropout=0.1, n_position=200,
trg_emb_prj_weight_sharing=True, emb_src_trg_weight_sharing=True,
scale_emb_or_prj='prj'):
super().__init__()
self.src_pad_idx, self.trg_pad_idx = src_pad_idx, trg_pad_idx
# In section 3.4 of paper "Attention Is All You Need", there is such detail:
# "In our model, we share the same weight matrix between the two
# embedding layers and the pre-softmax linear transformation...
# In the embedding layers, we multiply those weights by \sqrt{d_model}".
#
# Options here:
# 'emb': multiply \sqrt{d_model} to embedding output
# 'prj': multiply (\sqrt{d_model} ^ -1) to linear projection output
# 'none': no multiplication
assert scale_emb_or_prj in ['emb', 'prj', 'none']
scale_emb = (scale_emb_or_prj == 'emb') if trg_emb_prj_weight_sharing else False
self.scale_prj = (scale_emb_or_prj == 'prj') if trg_emb_prj_weight_sharing else False
self.d_model = d_model
self.encoder = Encoder(
n_src_vocab=n_src_vocab, n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=src_pad_idx, dropout=dropout, scale_emb=scale_emb)
self.decoder = Decoder(
n_trg_vocab=n_trg_vocab, n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=trg_pad_idx, dropout=dropout, scale_emb=scale_emb)
self.trg_word_prj = nn.Linear(d_model, n_trg_vocab, bias=False)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
assert d_model == d_word_vec, \
'To facilitate the residual connections, \
the dimensions of all module outputs shall be the same.'
if trg_emb_prj_weight_sharing:
# Share the weight between target word embedding & last dense layer
self.trg_word_prj.weight = self.decoder.trg_word_emb.weight
if emb_src_trg_weight_sharing:
self.encoder.src_word_emb.weight = self.decoder.trg_word_emb.weight
def forward(self, src_seq, trg_seq):
src_mask = get_pad_mask(src_seq, self.src_pad_idx)
trg_mask = get_pad_mask(trg_seq, self.trg_pad_idx) & get_subsequent_mask(trg_seq)
enc_output, *_ = self.encoder(src_seq, src_mask)
dec_output, *_ = self.decoder(trg_seq, trg_mask, enc_output, src_mask)
seq_logit = self.trg_word_prj(dec_output)
if self.scale_prj:
seq_logit *= self.d_model ** -0.5
return seq_logit.view(-1, seq_logit.size(2))
| 7,678 | 37.58794 | 99 | py |
easyreg | easyreg-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
from version import get_git_version
# Package meta-data.
NAME = 'easyreg'
DESCRIPTION = 'Deep-learning image registration toolbox based on pyTorch'
URL = 'https://github.com/uncbiag/easyreg'
EMAIL = 'zyshen@cs.unc.edu'
AUTHOR = 'Zhengyang Shen'
REQUIRES_PYTHON = '>=3.7.0'
VERSION = get_git_version()
# What packages are required for this module to be executed?
REQUIRED = [
"numpy",
"pynrrd",
"future",
"scikit-image",
"cffi",
"itk",
"SimpleITK",
"torch",
"torchvision",
"pandas",
"matplotlib",
"scipy",
"openpyxl",
"sphinx",
"sphinx-gallery==0.3.1",
"nbsphinx",
"progressbar2",
"h5py",
"tensorboardX",
"blosc",
"ants",
"nibabel",
"webcolors",
"scikit-learn",
"gdown"
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
#package_data={'easyreg_apps': ['*.sh']},
#
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='Apache 2.0',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache 2.0 License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 4,359 | 26.080745 | 86 | py |
easyreg | easyreg-master/tools/draw_deformation.py | import numpy as np
import sys,os
os.environ["CUDA_VISIBLE_DEVICES"] = ''
from easyreg.viewers import *
from mermaid.utils import *
from mermaid.data_utils import *
import SimpleITK as sitk
from glob import glob
import os
sz = [160,200,200]
def get_image_list_to_draw(refer_folder,momentum_folder,img_type,source_target_folder,t_list):
"""
we first need to get a dict, where we can get the {"pair_name": "pair":[source,target], "fluid_warped":[warped],"phi"[phi for t=1],"t":[],"linear_warped"[]}
:param refer_folder:
:param img_type:
:param source_txt:
:return:
"""
pair_path_list = glob(os.path.join(refer_folder,"*"+img_type))
#pair_path_list = glob(os.path.join(refer_folder,"*9069761_image_9074437_image_9069761_image_9397988_image_0d0000_1d0000_t_1d00_image.nii.gz"))
pair_name_list = [get_file_name(path).replace(img_type.split(".")[0],"") for path in pair_path_list]
source_name_list = [name.split("_")[0]+"_image" for name in pair_name_list]
target_name_list = [name.split("_")[6]+"_image" for name in pair_name_list]
momentum_list = [source_name+'_'+target_name+"_0000Momentum.nii.gz" for source_name, target_name in zip(source_name_list,target_name_list)]
momentum_list = [os.path.join(momentum_folder,fname) for fname in momentum_list]
source_path_list = [os.path.join(source_target_folder,source_name+'.nii.gz') for source_name in source_name_list]
target_path_list = [os.path.join(source_target_folder,target_name+'.nii.gz') for target_name in target_name_list]
lsource_path_list = [path.replace("image.nii.gz","masks.nii.gz") for path in source_path_list]
ltarget_path_list = [path.replace("image.nii.gz","masks.nii.gz") for path in target_path_list]
warped_path_list = [[os.path.join(refer_folder,pair_name+"_0d0000_1d0000_t_{}_image.nii.gz".format(str("{:.2f}".format(t)).replace(".","d")))for t in t_list] for pair_name in pair_name_list ]
phi_path_list =[[path.replace("_image.nii.gz","_phi_map.nii.gz") for path in paths] for paths in warped_path_list]
inv_phi_path_list =[[path.replace("_image.nii.gz","_inv_map.nii.gz") for path in paths] for paths in warped_path_list]
lwarped_path_list = [[warped_path.replace("image.nii.gz","label.nii.gz") for warped_path in pair_warped_path] for pair_warped_path in warped_path_list]
phi1_path = [path.replace("_image.nii.gz","_phi_map.nii.gz") for path in pair_path_list]
dict_to_draw = {}
for i, pair_name in enumerate(pair_name_list):
dict_to_draw[pair_name] = {"pair_name": pair_name, "pair_path":[source_path_list[i],target_path_list[i],lsource_path_list[i],ltarget_path_list[i]]
,"fluid_path":warped_path_list[i], "lfluid_path":lwarped_path_list[i],"phi_path":phi_path_list[i],"phi1":phi1_path[i],"t":t_list,"momentum_path":momentum_list[i],"inv_phi_path":inv_phi_path_list[i]}
return dict_to_draw
def draw_images(dict_to_draw):
for pair_name in dict_to_draw:
try:
draw_image(dict_to_draw[pair_name])
except:
pass
def draw_image(single_image_dict):
source_path = single_image_dict['pair_path'][0]
target_path = single_image_dict['pair_path'][1]
lsource_path = single_image_dict['pair_path'][2]
ltarget_path = single_image_dict['pair_path'][3]
fluid_path_list = single_image_dict['fluid_path']
lfluid_path_list = single_image_dict['lfluid_path']
phi_path_list = single_image_dict['phi_path']
phi1_path =single_image_dict["phi1"]
t_list =single_image_dict["t"]
fr_sitk = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))
source = fr_sitk(source_path)
lsource = fr_sitk(lsource_path)
target = fr_sitk(target_path)
ltarget = fr_sitk(ltarget_path)
fluid_images = [fr_sitk(path) for path in fluid_path_list]
lfluid_images = [fr_sitk(path) for path in lfluid_path_list]
phis = [np.transpose(fr_sitk(path),[3,2,1,0]) for path in phi_path_list]
phi1 = np.transpose(fr_sitk(phi1_path),[3,2,1,0])
phi1_tensor = torch.Tensor(phi1[None])
spacing = 1./(np.array(source.shape)-1)
identity_map_np = identity_map_multiN([1,1]+sz,spacing)
identity_map = torch.Tensor(identity_map_np)
source_tensor = torch.Tensor(source)[None][None]
lsource_tensor = torch.Tensor(lsource)[None][None]
if list(phi1_tensor.shape[2:])!=list(source.shape[2:]):
fres = lambda x:resample_image(x, spacing, [1, 3] + list(lsource_tensor.shape[2:]))
phi1_tensor, _ = fres(phi1_tensor)
phis = [fres(torch.Tensor(phi[None]))[0] for phi in phis]
phis =[phi[0].numpy() for phi in phis]
disp = phi1_tensor - identity_map
linear_images = []
llinear_images = []
linear_phis = []
for t in t_list:
phi = identity_map + disp*t
linear = compute_warped_image_multiNC(source_tensor,phi,spacing,spline_order=1,zero_boundary=True)
llinear = compute_warped_image_multiNC(lsource_tensor,phi,spacing,spline_order=0,zero_boundary=True)
linear_images.append(linear.numpy()[0,0])
llinear_images.append(llinear.numpy()[0,0])
linear_phis.append(phi.numpy()[0])
draw_defomation(fluid_images, phis, linear_images, linear_phis,source,identity_map_np[0])
def draw_defomation(fluid_images,phis,linear_images,linear_phis,source,identity_map):
fig, ax = plt.subplots(2, 6, figsize=(45, 16))
# img = np.zeros_like(img)
# plt.setp(plt.gcf(), 'facecolor', 'white')
# plt.style.use('grayscale')
plt.style.use("bmh")
ivx = ImageViewer3D_Sliced_Contour(ax[0][0], linear_images[0], linear_phis[0], 0, '', showColorbar=False)
ivy = ImageViewer3D_Sliced_Contour(ax[0][1], linear_images[1], linear_phis[1], 0, '', showColorbar=False)
#ivz = ImageViewer3D_Sliced_Contour(ax[0][2], source, identity_map, 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[0][2], linear_images[2], linear_phis[2], 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[0][3], linear_images[3], linear_phis[3], 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[0][4], linear_images[4], linear_phis[4], 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[0][5], linear_images[5], linear_phis[5], 0, '', showColorbar=False)
#ivz = ImageViewer3D_Sliced_Contour(ax[0][7], linear_images[6], linear_phis[6], 0, '', showColorbar=False)
ivx = ImageViewer3D_Sliced_Contour(ax[1][0], fluid_images[0], phis[0], 0, '', showColorbar=False)
ivy = ImageViewer3D_Sliced_Contour(ax[1][1], fluid_images[1], phis[1], 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[1][2], fluid_images[2], phis[2], 0, '', showColorbar=False)
#ivz = ImageViewer3D_Sliced_Contour(ax[1][3], source, identity_map, 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[1][3], fluid_images[3], phis[3], 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[1][4], fluid_images[4], phis[4], 0, '', showColorbar=False)
ivz = ImageViewer3D_Sliced_Contour(ax[1][5], fluid_images[5], phis[5], 0, '', showColorbar=False)
#ivz = ImageViewer3D_Sliced_Contour(ax[1][7], fluid_images[6], phis[6], 0, '', showColorbar=False)
plt.axis('off')
plt.clim(vmin=-1., vmax=1.)
plt.show()
def view_2d_from_3d(img=None, phi=None,fpth=None,color=True):
fig, ax = plt.subplots(1,1)
#plt.setp(plt.gcf(), 'facecolor', 'white')
if not color:
plt.style.use('grayscale')
else:
plt.style.use("bmh")
ax.set_axis_off()
if img is None:
img = np.zeros_like(phi[0])
ImageViewer3D_Sliced(ax, img, 0, '', False)
if phi is not None:
ImageViewer3D_Sliced_Contour(ax, img, phi, 0, '', showColorbar=False)
if fpth is not None:
plt.savefig(fpth, dpi=100, bbox_inches='tight')
plt.close('all')
else:
plt.show()
plt.clf()
#
# img_type = "_0d0000_1d0000_t_1d00_image.nii.gz"
# t_list = [-3,-1,0.5,1,3,4]
# source_target_folder = "/playpen-raid/olut/Nifti_resampled_rescaled_2Left_Affine2atlas"
# #
# refer_folder = "/playpen-raid/zyshen/data/oai_reg/draw4"
# dict_to_draw = get_image_list_to_draw(refer_folder,"",img_type,source_target_folder,t_list)
# draw_images(dict_to_draw)
#
def read_img_phi(img_path_list, phi_path_list=None):
f = lambda pth: sitk.GetArrayFromImage(sitk.ReadImage(pth))
img_list = [f(pth) for pth in img_path_list]
phi_list = None
if phi_path_list is not None:
phi_list = [f(pth) for pth in phi_path_list]
phi_list = [np.transpose(phi, (3, 2, 1,0)) for phi in phi_list]
return img_list, phi_list
from tools.visual_tools import *
img_type = "_0d0000_1d0000_t_1d00_image.nii.gz"
t_list = [-1, -0.5, 0.5,1, 1.5, 2.0]
source_target_folder = "/playpen-raid/olut/Nifti_resampled_rescaled_2Left_Affine2atlas"
#/playpen-raid/zyshen/data/oai_reg/train_with_10/momentum_lresol/9397988_image_9074437_image_0000Momentum.nii.gz
momentum_folder ="/playpen-raid/zyshen/data/oai_reg/train_with_10/momentum_lresol"
momentum_ftype = "_0000Momentum.nii.gz"
refer_folder = "/playpen-raid/zyshen/data/oai_reg/draw4"
dict_to_draw = get_image_list_to_draw(refer_folder,momentum_folder,img_type,source_target_folder,t_list)
output_folder = "/playpen-raid1/zyshen/data/oai_reg/draw_output4"
"""
dict_to_draw[pair_name] = {"pair_name": pair_name, "pair_path":[source_path_list[i],target_path_list[i],lsource_path_list[i],ltarget_path_list[i]]
,"fluid_path":warped_path_list[i], "lfluid_path":lwarped_path_list[i],"phi_path":phi_path_list[i],"phi1":phi1_path[i],"t":t_list,"momentum_path":momentum_list[i]}
for each pair name, we have source.png, target.png, momentum.png, phi_name.png, warped_name.png, l_warped_name.png """
for pair_name, pair_detail in dict_to_draw.items():
output_path = os.path.join(output_folder,pair_name)
os.makedirs(output_path,exist_ok=True)
source_path = pair_detail["pair_path"][0]
target_path = pair_detail["pair_path"][1]
lsource_path = pair_detail["pair_path"][2]
momentum_path = pair_detail["momentum_path"]
phi_path_list = pair_detail["phi_path"]
inv_phi_path_list = pair_detail["inv_phi_path"]
warped_path_list = pair_detail["fluid_path"]
l_warped_path_list = pair_detail["lfluid_path"]
source_save_path = os.path.join(output_path,"source.png")
lsource_save_path = os.path.join(output_path,"lsource.png")
target_save_path = os.path.join(output_path,"target.png")
momentum_save_path = os.path.join(output_path,"momentum.png")
warped_name_list = [get_file_name(pth) for pth in warped_path_list]
warped_save_path_list = [os.path.join(output_path,fname) +"_warped.png" for fname in warped_name_list]
lwarped_save_path_list = [os.path.join(output_path,fname) + "_lwarped.png" for fname in warped_name_list]
lwarped_phi_save_path_list = [os.path.join(output_path,fname) + "_lwarpedphi.png" for fname in warped_name_list]
lwarped_invphi_save_path_list = [os.path.join(output_path,fname) + "_lwarpedinvphi.png" for fname in warped_name_list]
phi_save_path_list = [os.path.join(output_path,fname) + "_phi.png" for fname in warped_name_list]
inv_phi_save_path_list = [os.path.join(output_path,fname) + "_inv_phi.png" for fname in warped_name_list]
img_phi_save_path_list = [os.path.join(output_path,fname) + "_imgphi.png" for fname in warped_name_list]
f = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))
f_v = lambda x: np.transpose(f(x),[3,2,1,0])
view_2d_from_3d(img=f(source_path),fpth=source_save_path)
view_2d_from_3d(img=f(target_path),fpth=target_save_path)
view_2d_from_3d(img=f(lsource_path),fpth=lsource_save_path)
momentum = f_v(momentum_path)
momentum = np.sum(momentum ** 2, 1)
view_2d_from_3d(img=momentum, fpth=momentum_save_path,color=True)
l = f(lsource_path)
for i in range(len(warped_name_list)):
warped = f(warped_path_list[i])
view_2d_from_3d(img=warped, fpth=warped_save_path_list[i])
view_2d_from_3d(img=f(l_warped_path_list[i]), fpth=lwarped_save_path_list[i])
view_2d_from_3d(img=f(l_warped_path_list[i]),phi=f_v(phi_path_list[i]), fpth=lwarped_phi_save_path_list[i])
view_2d_from_3d(phi=f_v(phi_path_list[i]), fpth=phi_save_path_list[i])
try:
view_2d_from_3d(img=l, phi=f_v(inv_phi_path_list[i]), fpth=lwarped_invphi_save_path_list[i])
view_2d_from_3d(phi=f_v(inv_phi_path_list[i]), fpth=inv_phi_save_path_list[i])
except:
pass
view_2d_from_3d(img =warped ,phi=f_v(phi_path_list[i]), fpth=img_phi_save_path_list[i])
| 12,626 | 52.731915 | 210 | py |
easyreg | easyreg-master/tools/transform_disp_into_torch_form.py | import nibabel as nib
import numpy as np
def transform_disp_into_torch_form(inv_transform_file):
inv_map = nib.load(inv_transform_file)
inv_map = inv_map.get_fdata()
assert inv_map.shape[0]==3
inv_map = np.transpose(inv_map,[3,2,1,0])
return inv_map
| 271 | 26.2 | 55 | py |
easyreg | easyreg-master/tools/draw_deformation_2d.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = ''
from tools.draw_deformation_viewers import *
from mermaid.utils import *
from mermaid.data_utils import *
from glob import glob
sz = [160,200,200]
def get_image_list_to_draw(refer_folder,momentum_folder,img_type,source_target_folder,t_list):
"""
we first need to get a dict, where we can get the {"pair_name": "pair":[source,target], "fluid_warped":[warped],"phi"[phi for t=1],"t":[],"linear_warped"[]}
:param refer_folder:
:param img_type:
:param source_txt:
:return:
"""
pair_path_list = glob(os.path.join(refer_folder,"*"+img_type))
#pair_path_list = glob(os.path.join(refer_folder,"*9069761_image_9074437_image_9069761_image_9397988_image_0d0000_1d0000_t_1d00_image.nii.gz"))
pair_name_list = [get_file_name(path).replace(img_type.split(".")[0],"") for path in pair_path_list]
source_name_list = [name.split("_")[0]+"_image" for name in pair_name_list]
target_name_list = [[name.split("_")[2]+"_image",name.split("_")[6]+"_image"] for name in pair_name_list]
momentum_list = [[source_name+'_'+target_name+"_0000Momentum.nii.gz" for target_name in t_name_list] for source_name, t_name_list in zip(source_name_list,target_name_list) ]
momentum_list = [[os.path.join(momentum_folder,fname) for fname in fname_list] for fname_list in momentum_list]
source_path_list = [os.path.join(source_target_folder,source_name+'.nii.gz') for source_name in source_name_list]
target_path_list = [[os.path.join(source_target_folder,target_name+'.nii.gz') for target_name in t_name_list ] for t_name_list in target_name_list]
lsource_path_list = [path.replace("image.nii.gz","masks.nii.gz") for path in source_path_list]
ltarget_path_list = [[path.replace("image.nii.gz","masks.nii.gz") for path in path_list] for path_list in target_path_list]
#9905156_image_9074437_image_9905156_image_9397988_image_0d5000_0d5000_t_1d00_phi_map.nii.gz
fs = lambda x: str("{:.4f}".format(x)).replace(".","d")
ft = lambda x: str("{:.2f}".format(x)).replace(".","d")
weight_list = [0,0.25,0.5,0.75,1.0]
warped_path_list = [[os.path.join(refer_folder,pair_name+"_{}_{}_t_{}_image.nii.gz".format(fs(i),fs(1.0-i),ft(t))) for t in [-1,-0.5, 0.5,1,2] for i in weight_list ] for pair_name in pair_name_list ]
phi_path_list =[[path.replace("_image.nii.gz","_phi_map.nii.gz") for path in paths] for paths in warped_path_list]
inv_phi_path_list =[[path.replace("_image.nii.gz","_inv_map.nii.gz") for path in paths] for paths in warped_path_list]
lwarped_path_list = [[warped_path.replace("image.nii.gz","label.nii.gz") for warped_path in pair_warped_path] for pair_warped_path in warped_path_list]
phi1_path = [path.replace("_image.nii.gz","_phi_map.nii.gz") for path in pair_path_list]
dict_to_draw = {}
for i, pair_name in enumerate(pair_name_list):
dict_to_draw[pair_name] = {"pair_name": pair_name, "pair_path":[source_path_list[i],target_path_list[i],lsource_path_list[i],ltarget_path_list[i]]
,"fluid_path":warped_path_list[i], "lfluid_path":lwarped_path_list[i],"phi_path":phi_path_list[i],"phi1":phi1_path[i],"t":t_list,"momentum_path":momentum_list[i],"inv_phi_path":inv_phi_path_list[i]}
return dict_to_draw
def draw_images(dict_to_draw,saving_path=None):
for pair_name in dict_to_draw:
try:
draw_image(dict_to_draw[pair_name],saving_path)
except:
pass
def draw_image(single_image_dict,saving_path=None):
source_path = single_image_dict['pair_path'][0]
target_path = single_image_dict['pair_path'][1]
pair_name = single_image_dict['pair_name']
lsource_path = single_image_dict['pair_path'][2]
ltarget_path = single_image_dict['pair_path'][3]
fluid_path_list = single_image_dict['fluid_path']
lfluid_path_list = single_image_dict['lfluid_path']
phi_path_list = single_image_dict['phi_path']
phi1_path =single_image_dict["phi1"]
t_list =single_image_dict["t"]
fr_sitk = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))
source = fr_sitk(source_path)
lsource = fr_sitk(lsource_path)
target = [fr_sitk(path) for path in target_path]
ltarget = [fr_sitk(path) for path in ltarget_path]
fluid_images = [fr_sitk(path) for path in fluid_path_list]
lfluid_images = [fr_sitk(path) for path in lfluid_path_list]
phis = [np.transpose(fr_sitk(path),[3,2,1,0]) for path in phi_path_list]
#phi1 = np.transpose(fr_sitk(phi1_path),[3,2,1,0])
#phi1_tensor = torch.Tensor(phi1[None])
spacing = 1./(np.array(source.shape)-1)
identity_map_np = identity_map_multiN([1,1]+sz,spacing)
identity_map = torch.Tensor(identity_map_np)
# source_tensor = torch.Tensor(source)[None][None]
# lsource_tensor = torch.Tensor(lsource)[None][None]
# if list(phi1_tensor.shape[2:])!=list(source.shape[2:]):
# fres = lambda x:resample_image(x, spacing, [1, 3] + list(lsource_tensor.shape[2:]))
# phi1_tensor, _ = fres(phi1_tensor)
# phis = [fres(torch.Tensor(phi[None]))[0] for phi in phis]
# phis =[phi[0].numpy() for phi in phis]
# disp = phi1_tensor - identity_map
# linear_images = []
# llinear_images = []
# linear_phis = []
# for t in t_list:
# phi = identity_map + disp*t
# linear = compute_warped_image_multiNC(source_tensor,phi,spacing,spline_order=1,zero_boundary=True)
# llinear = compute_warped_image_multiNC(lsource_tensor,phi,spacing,spline_order=0,zero_boundary=True)
# linear_images.append(linear.numpy()[0,0])
# llinear_images.append(llinear.numpy()[0,0])
# linear_phis.append(phi.numpy()[0])
if saving_path is not None:
fpth = os.path.join(saving_path,pair_name+".png")
else:
fpth = None
draw_defomation(fluid_images, phis,source,target,identity_map_np[0],fpth)
def draw_defomation(fluid_images, phis,source,target_list,identity_map,fpth):
fig, ax = plt.subplots(6, 7, figsize=(35, 30))
# img = np.zeros_like(img)
# plt.setp(plt.gcf(), 'facecolor', 'white')
# plt.style.use('grayscale')
plt.style.use("bmh")
ImageViewer3D_Sliced(ax[0][0], target_list[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[0][1], fluid_images[0], phis[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[0][2], fluid_images[1], phis[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[0][3], fluid_images[2], phis[2], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[0][4], fluid_images[3], phis[3], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[0][5], fluid_images[4], phis[4], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[0][6], target_list[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[1][0], target_list[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[1][1], fluid_images[5], phis[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[1][2], fluid_images[6], phis[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[1][3], fluid_images[7], phis[2], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[1][4], fluid_images[8], phis[3], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[1][5], fluid_images[9], phis[4], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[1][6], target_list[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[2][0], target_list[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[2][1], source,identity_map, 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[2][2], source,identity_map, 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[2][3], source,identity_map, 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[2][4], source,identity_map, 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[2][5], source,identity_map, 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[2][6], target_list[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[3][0], target_list[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[3][1], fluid_images[10], phis[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[3][2], fluid_images[11], phis[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[3][3], fluid_images[12], phis[2], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[3][4], fluid_images[13], phis[3], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[3][5], fluid_images[14], phis[4], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[3][6], target_list[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[4][0], target_list[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[4][1], fluid_images[15], phis[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[4][2], fluid_images[16], phis[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[4][3], fluid_images[17], phis[2], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[4][4], fluid_images[18], phis[3], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[4][5], fluid_images[19], phis[4], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[4][6], target_list[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[5][0], target_list[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[5][1], fluid_images[20], phis[0], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[5][2], fluid_images[21], phis[1], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[5][3], fluid_images[22], phis[2], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[5][4], fluid_images[23], phis[3], 0, '', showColorbar=False)
ImageViewer3D_Sliced_Contour(ax[5][5], fluid_images[24], phis[4], 0, '', showColorbar=False)
ImageViewer3D_Sliced(ax[5][6], target_list[0], 0, '', showColorbar=False)
plt.axis('off')
plt.clim(vmin=-1., vmax=1.)
if fpth is not None:
plt.savefig(fpth, dpi=100, bbox_inches='tight')
plt.close('all')
else:
plt.show()
plt.clf()
def view_2d_from_3d(img=None, phi=None,fpth=None,color=True):
fig, ax = plt.subplots(1,1)
#plt.setp(plt.gcf(), 'facecolor', 'white')
if not color:
plt.style.use('grayscale')
else:
plt.style.use("bmh")
ax.set_axis_off()
if img is None:
img = np.zeros_like(phi[0])
ImageViewer3D_Sliced(ax, img, 0, '', False)
if phi is not None:
ImageViewer3D_Sliced_Contour(ax, img, phi, 0, '', showColorbar=False)
if fpth is not None:
plt.savefig(fpth, dpi=100, bbox_inches='tight')
plt.close('all')
else:
plt.show()
plt.clf()
#
# img_type = "_0d0000_1d0000_t_1d00_image.nii.gz"
# t_list = [-3,-1,0.5,1,3,4]
# source_target_folder = "/playpen-raid/olut/Nifti_resampled_rescaled_2Left_Affine2atlas"
# #
# refer_folder = "/playpen-raid/zyshen/data/oai_reg/draw4"
# dict_to_draw = get_image_list_to_draw(refer_folder,"",img_type,source_target_folder,t_list)
# draw_images(dict_to_draw)
#
def read_img_phi(img_path_list, phi_path_list=None):
f = lambda pth: sitk.GetArrayFromImage(sitk.ReadImage(pth))
img_list = [f(pth) for pth in img_path_list]
phi_list = None
if phi_path_list is not None:
phi_list = [f(pth) for pth in phi_path_list]
phi_list = [np.transpose(phi, (3, 2, 1,0)) for phi in phi_list]
return img_list, phi_list
from tools.visual_tools import *
img_type = "_0d0000_1d0000_t_1d00_image.nii.gz"
t_list = [-1, -0.5, 0.5,1, 1.5, 2.0]
source_target_folder = "/playpen-raid/olut/Nifti_resampled_rescaled_2Left_Affine2atlas"
#/playpen-raid/zyshen/data/oai_reg/train_with_10/momentum_lresol/9397988_image_9074437_image_0000Momentum.nii.gz
momentum_folder ="/playpen-raid/zyshen/data/oai_reg/train_with_10/momentum_lresol"
momentum_ftype = "_0000Momentum.nii.gz"
refer_folder = "/playpen-raid1/zyshen/data/oai_reg/draw2"
dict_to_draw = get_image_list_to_draw(refer_folder,momentum_folder,img_type,source_target_folder,t_list)
saving_path = "/playpen-raid1/zyshen/data/oai_reg/draw_2d"
os.makedirs(saving_path,exist_ok=True)
draw_images(dict_to_draw,None)
output_folder = "/playpen-raid1/zyshen/data/oai_reg/draw_output5"
"""
dict_to_draw[pair_name] = {"pair_name": pair_name, "pair_path":[source_path_list[i],target_path_list[i],lsource_path_list[i],ltarget_path_list[i]]
,"fluid_path":warped_path_list[i], "lfluid_path":lwarped_path_list[i],"phi_path":phi_path_list[i],"phi1":phi1_path[i],"t":t_list,"momentum_path":momentum_list[i]}
# for each pair name, we have source.png, target.png, momentum.png, phi_name.png, warped_name.png, l_warped_name.png """
# for pair_name, pair_detail in dict_to_draw.items():
# output_path = os.path.join(output_folder,pair_name)
# os.makedirs(output_path,exist_ok=True)
# source_path = pair_detail["pair_path"][0]
# target_path = pair_detail["pair_path"][1]
# lsource_path = pair_detail["pair_path"][2]
# momentum_path = pair_detail["momentum_path"]
# phi_path_list = pair_detail["phi_path"]
# inv_phi_path_list = pair_detail["inv_phi_path"]
# warped_path_list = pair_detail["fluid_path"]
# l_warped_path_list = pair_detail["lfluid_path"]
# source_save_path = os.path.join(output_path,"source.png")
# lsource_save_path = os.path.join(output_path,"lsource.png")
# target_save_path = os.path.join(output_path,"target.png")
# momentum_save_path = os.path.join(output_path,"momentum.png")
# warped_name_list = [get_file_name(pth) for pth in warped_path_list]
# warped_save_path_list = [os.path.join(output_path,fname) +"_warped.png" for fname in warped_name_list]
# lwarped_save_path_list = [os.path.join(output_path,fname) + "_lwarped.png" for fname in warped_name_list]
# lwarped_phi_save_path_list = [os.path.join(output_path,fname) + "_lwarpedphi.png" for fname in warped_name_list]
# lwarped_invphi_save_path_list = [os.path.join(output_path,fname) + "_lwarpedinvphi.png" for fname in warped_name_list]
# phi_save_path_list = [os.path.join(output_path,fname) + "_phi.png" for fname in warped_name_list]
# inv_phi_save_path_list = [os.path.join(output_path,fname) + "_inv_phi.png" for fname in warped_name_list]
# img_phi_save_path_list = [os.path.join(output_path,fname) + "_imgphi.png" for fname in warped_name_list]
# f = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))
# f_v = lambda x: np.transpose(f(x),[3,2,1,0])
# view_2d_from_3d(img=f(source_path),fpth=source_save_path)
# view_2d_from_3d(img=f(target_path),fpth=target_save_path)
# view_2d_from_3d(img=f(lsource_path),fpth=lsource_save_path)
# momentum = f_v(momentum_path)
# momentum = np.sum(momentum ** 2, 1)
# view_2d_from_3d(img=momentum, fpth=momentum_save_path,color=True)
# l = f(lsource_path)
# for i in range(len(warped_name_list)):
# warped = f(warped_path_list[i])
# view_2d_from_3d(img=warped, fpth=warped_save_path_list[i])
# view_2d_from_3d(img=f(l_warped_path_list[i]), fpth=lwarped_save_path_list[i])
# view_2d_from_3d(img=f(l_warped_path_list[i]),phi=f_v(phi_path_list[i]), fpth=lwarped_phi_save_path_list[i])
# view_2d_from_3d(phi=f_v(phi_path_list[i]), fpth=phi_save_path_list[i])
# try:
# view_2d_from_3d(img=l, phi=f_v(inv_phi_path_list[i]), fpth=lwarped_invphi_save_path_list[i])
# view_2d_from_3d(phi=f_v(inv_phi_path_list[i]), fpth=inv_phi_save_path_list[i])
# except:
# pass
# view_2d_from_3d(img =warped ,phi=f_v(phi_path_list[i]), fpth=img_phi_save_path_list[i])
#
#
#
#
# disp_pth = '/playpen-raid/zyshen/data/reg_debug_labeled_oai_reg_inter/visualize_affine/records/3D/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_image_9357383_20040927_SAG_3D_DESS_LEFT_016610250606_imagemap.nii.gz'
# img_pth = '/playpen-raid/zyshen/data/reg_debug_labeled_oai_reg_inter/visualize_affine/records/3D/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_image_9357383_20040927_SAG_3D_DESS_LEFT_016610250606_image_reproduce.nii.gz'
# disp = sitk.ReadImage(disp_pth)
# disp = sitk.GetArrayFromImage(disp)
# img = sitk.GetArrayFromImage(sitk.ReadImage(img_pth))
# #disp = np.transpose(disp,(3,2,1,0))
#
#
# spacing = 1. / (sz - 1)
# identity_map = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# grid = identity_map+ disp
# grid[0] = grid[0]*spacing[0]
# grid[1] = grid[1]*spacing[1]
# grid[2] = grid[2]*spacing[2]
# grid = grid*2-1
# print(np.max(grid), np.min(grid))
#
#
# fig,ax = plt.subplots(2,7,figsize=(50, 30))
# # img = np.zeros_like(img)
# img[1,:,1]=1
# plt.setp(plt.gcf(), 'facecolor', 'white')
# plt.style.use('grayscale')
#
# ivx = ImageViewer3D_Sliced_Contour( ax[0][0], img,grid, 0, '',showColorbar=True)
# ivy = ImageViewer3D_Sliced_Contour( ax[0][1], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[0][2], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[0][3], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[0][4], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[0][5], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[0][6], img,grid, 0, '',showColorbar=True)
#
# ivx = ImageViewer3D_Sliced_Contour( ax[1][0], img,grid, 0, '',showColorbar=True)
# ivy = ImageViewer3D_Sliced_Contour( ax[1][1], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[1][2], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[1][3], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[1][4], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[1][5], img,grid, 0, '',showColorbar=True)
# ivz = ImageViewer3D_Sliced_Contour( ax[1][6], img,grid, 0, '',showColorbar=True)
#
# # feh = FigureEventHandler(fig)
# #
# # feh.add_axes_event('button_press_event', ax[0], ivx.on_mouse_press)
# # feh.add_axes_event('button_press_event', ax[1], ivy.on_mouse_press)
# # feh.add_axes_event('button_press_event', ax[2], ivz.on_mouse_press)
# #
# # feh.synchronize([ax[0], ax[1], ax[2]])
# plt.clim(vmin=-1., vmax=1.)
# plt.show()
#
| 18,322 | 52.110145 | 222 | py |
easyreg | easyreg-master/tools/transform_between_mermaid_and_itk.py | import SimpleITK as sitk
import numpy as np
import os
from easyreg.utils import resample_image
from mermaid.utils import compute_warped_image_multiNC
import torch
from easyreg.net_utils import gen_identity_map
from easyreg.demons_utils import sitk_grid_sampling
import tools.image_rescale as ires
# img_org_path = "/playpen-raid1/zyshen/debug/9352883_20051123_SAG_3D_DESS_LEFT_016610798103_image.nii.gz"
# img_tar_path = "/playpen-raid1/zyshen/debug/9403165_20060316_SAG_3D_DESS_LEFT_016610900302_image.nii.gz"
# moving_path = "/playpen-raid1/zyshen/debug/9352883_20051123_SAG_3D_DESS_LEFT_016610798103_image_cleaned.nii.gz"
# target_path = "/playpen-raid1/zyshen/debug/9403165_20060316_SAG_3D_DESS_LEFT_016610900302_image_cleaned.nii.gz"
#
# moving_org = sitk.ReadImage(img_org_path)
# spacing_ref = moving_org.GetSpacing()
# direc_ref = moving_org.GetDirection()
# orig_ref = moving_org.GetOrigin()
# img_itk = sitk.GetImageFromArray(sitk.GetArrayFromImage(moving_org))
# img_itk.SetSpacing(spacing_ref)
# img_itk.SetDirection(direc_ref)
# img_itk.SetOrigin(orig_ref)
# sitk.WriteImage(img_itk,moving_path)
#
#
# target_org = sitk.ReadImage(img_tar_path)
# spacing_ref = target_org.GetSpacing()
# spacing_ref = tuple(s*2 for s in spacing_ref)
# direc_ref = target_org.GetDirection()
# orig_ref = target_org.GetOrigin()
# img_itk = sitk.GetImageFromArray(sitk.GetArrayFromImage(target_org))
# img_itk.SetSpacing(spacing_ref)
# img_itk.SetDirection(direc_ref)
# img_itk.SetOrigin(orig_ref)
# sitk.WriteImage(img_itk,target_path)
#
#
# moving = sitk.ReadImage(moving_path)
# target = sitk.ReadImage(target_path)
# moving_np = sitk.GetArrayFromImage(moving)
#
#
# img_sz = np.array(moving_np.shape)
# spacing = 1./(np.array(img_sz)-1)
#
# id_np= gen_identity_map(img_sz, resize_factor=1., normalized=True)
# id_np = (id_np+1.)/2
# #disp_np = np.zeros([3]+list(moving_np.shape)).astype(np.float32)
# disp_np = np.random.rand(3,80,192,192).astype(np.float32)/20
# disp_np[0] = disp_np[0]+0.03
# disp_np[1] = disp_np[1]+0.05
# disp_np[2] = disp_np[2]+0.09
# phi_np = id_np + disp_np
#
# phi = torch.Tensor(phi_np)
# warped_mermaid = compute_warped_image_multiNC(torch.Tensor(moving_np)[None][None],phi[None],spacing,spline_order=1,zero_boundary=True)
# ires.save_image_with_given_reference(warped_mermaid,[target_path],"/playpen-raid1/zyshen/debug",["9352883_20051123_SAG_3D_DESS_LEFT_016610798103_image_warped"])
#
# trans =ires.save_transform_itk(disp_np[None], spacing,[moving_path],[target_path],"/playpen-raid1/zyshen/debug",["9352883_20051123_SAG_3D_DESS_LEFT_016610798103_image"] )
# warped_itk = sitk_grid_sampling(target, moving, trans)
# sitk_warped_path = "/playpen-raid1/zyshen/debug/9352883_20051123_SAG_3D_DESS_LEFT_016610798103_image_warped_sitk.nii.gz"
# sitk.WriteImage(warped_itk, sitk_warped_path)
# print("Done")
############### reconstruct the lung ##############33
"""
the behavior of the itk is not clear, it would first move the source based on a displacement map with the moving size (here we assumed, maybe wrong should also check the map with target size) but apply on the target image
"""
moving_path ="/playpen-raid1/zyshen/data/demo_for_lung_reg/reg/res/records/original_sz/11769X_EXP_STD_BWH_COPD_img_11769X_INSP_STD_BWH_COPD_img_moving.nii.gz"
target_path ="/playpen-raid1/zyshen/data/demo_for_lung_reg/reg/res/records/original_sz/11769X_EXP_STD_BWH_COPD_img_11769X_INSP_STD_BWH_COPD_img_target.nii.gz"
disp_path ="/playpen-raid1/zyshen/data/demo_for_lung_reg/reg/res/records/original_sz/11769X_EXP_STD_BWH_COPD_img_11769X_INSP_STD_BWH_COPD_img_disp.h5"
inv_disp_path ="/playpen-raid1/zyshen/data/demo_for_lung_reg/reg/res/records/original_sz/11769X_EXP_STD_BWH_COPD_img_11769X_INSP_STD_BWH_COPD_img_inv_disp.h5"
mermaid_transform_path = "/playpen-raid1/zyshen/data/demo_for_lung_reg/reg/res/records/original_sz/11769X_EXP_STD_BWH_COPD_img_11769X_INSP_STD_BWH_COPD_img_phi.nii.gz"
mermaid_inv_transform_path = "/playpen-raid1/zyshen/data/demo_for_lung_reg/reg/res/records/original_sz/11769X_EXP_STD_BWH_COPD_img_11769X_INSP_STD_BWH_COPD_img_inv_phi.nii.gz"
moving_itk = sitk.ReadImage(moving_path)
target_itk = sitk.ReadImage(target_path)
trans_itk = sitk.ReadTransform(disp_path)
warped_itk = sitk_grid_sampling(target_itk,moving_itk, trans_itk)
inv_trans_itk = sitk.ReadTransform(inv_disp_path)
inv_warped_itk = sitk_grid_sampling(moving_itk,target_itk, inv_trans_itk)
moving_np = sitk.GetArrayFromImage(moving_itk).astype(np.float32)
target_np = sitk.GetArrayFromImage(target_itk).astype(np.float32)
mermaid_phi = sitk.GetArrayFromImage(sitk.ReadImage(mermaid_transform_path)).transpose(3, 2, 1,0)
mermaid_inv_phi = sitk.GetArrayFromImage(sitk.ReadImage(mermaid_inv_transform_path)).transpose(3, 2, 1,0)
phi_sz = np.array(mermaid_phi.shape)
spacing = 1./(np.array(phi_sz[1:])-1)
moving = torch.from_numpy(moving_np[None][None])
mermaid_phi = torch.from_numpy(mermaid_phi[None])
warped_mermaid = compute_warped_image_multiNC(moving, mermaid_phi, spacing, 1, zero_boundary=True)
inv_phi_sz = np.array(mermaid_inv_phi.shape)
spacing = 1./(np.array(inv_phi_sz[1:])-1)
target = torch.from_numpy(target_np[None][None])
mermaid_inv_phi = torch.from_numpy(mermaid_inv_phi[None])
inv_warped_mermaid = compute_warped_image_multiNC(target, mermaid_inv_phi, spacing, 1, zero_boundary=True)
output_path = "/playpen-raid1/zyshen/data/demo_for_lung_reg"
sitk.WriteImage(warped_itk, os.path.join(output_path,"warped_itk.nii.gz"))
ires.save_image_with_given_reference(warped_mermaid,reference_list=[target_path],path=output_path,fname=["warped_mermaid"])
sitk.WriteImage(inv_warped_itk, os.path.join(output_path,"inv_warped_itk.nii.gz"))
ires.save_image_with_given_reference(inv_warped_mermaid,reference_list=[moving_path],path=output_path,fname=["inv_warped_mermaid"])
| 5,800 | 50.794643 | 221 | py |
easyreg | easyreg-master/tools/warp_image_label.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = ''
from easyreg.reg_data_utils import read_txt_into_list, get_file_name
from tools.image_rescale import save_image_with_given_reference
import SimpleITK as sitk
import torch
import numpy as np
from glob import glob
from mermaid.utils import compute_warped_image_multiNC, resample_image
def compute_warped_image_label(img_label_txt_pth,phi_pth,phi_type, saving_pth):
img_label_pth_list = read_txt_into_list(img_label_txt_pth)
phi_pth_list = glob(os.path.join(phi_pth,phi_type))
f = lambda pth: sitk.GetArrayFromImage(sitk.ReadImage(pth))
img_list = [f(pth[0]) for pth in img_label_pth_list]
label_list = [f(pth[1]) for pth in img_label_pth_list]
num_img = len(img_list)
for i in range(num_img):
fname = get_file_name(img_label_pth_list[i][0])
img = torch.Tensor(img_list[i][None][None])
label = torch.Tensor(label_list[i][None][None])
f_phi = lambda x: get_file_name(x).find(fname)==0
phi_sub_list = list(filter(f_phi, phi_pth_list))
num_aug = len(phi_sub_list)
phi_list = [f(pth) for pth in phi_sub_list]
img = img.repeat(num_aug,1,1,1,1)
label = label.repeat(num_aug,1,1,1,1)
phi = np.stack(phi_list,0)
phi = np.transpose(phi,(0,4,3,2,1))
phi = torch.Tensor(phi)
sz = np.array(img.shape[2:])
spacing = 1./(sz-1)
phi, _ = resample_image(phi,spacing,[1,3]+list(img.shape[2:]))
warped_img = compute_warped_image_multiNC(img,phi,spacing,spline_order=1,zero_boundary=True)
warped_label = compute_warped_image_multiNC(label,phi,spacing,spline_order=0,zero_boundary=True)
save_image_with_given_reference(warped_img,[img_label_pth_list[i][0]]*num_aug,saving_pth,[get_file_name(pth).replace("_phi","")+'_warped' for pth in phi_sub_list])
save_image_with_given_reference(warped_label,[img_label_pth_list[i][0]]*num_aug,saving_pth,[get_file_name(pth).replace("_phi","")+'_label' for pth in phi_sub_list])
# img_label_txt_pth = "/playpen-raid/zyshen/data/lpba_seg_resize/test/file_path_list.txt"
# phi_pth = "/playpen-raid/zyshen/data/lpba_reg/test_aug/reg/res/records"
# phi_type = '*_phi.nii.gz'
# saving_path = "/playpen-raid/zyshen/data/lpba_seg_resize/warped_img_label"
img_label_txt_pth = "/playpen-raid/zyshen/data/oai_seg/test/file_path_list.txt"
phi_pth = "/playpen-raid/zyshen/data/oai_reg/test_aug/reg/res/records"
phi_type = '*_phi.nii.gz'
saving_path = "/playpen-raid/zyshen/data/oai_seg/warped_img_label"
compute_warped_image_label(img_label_txt_pth,phi_pth,phi_type, saving_path)
| 2,617 | 49.346154 | 172 | py |
easyreg | easyreg-master/tools/visual_tools.py |
import matplotlib.pyplot as plt
from easyreg import utils
import SimpleITK as sitk
import torch
import numpy as np
import mermaid.finite_differences as fdt
import mermaid.utils as py_utils
import os
from scipy import misc
def read_png_into_numpy(file_path,name=None,visual=False):
image = misc.imread(file_path,flatten=True)
image = (image-image.min())/(image.max()-image.min())
if visual:
plot_2d_img(image,name if name is not None else'image')
return image
def read_png_into_standard_form(file_path,name=None,visual=False):
image = read_png_into_numpy(file_path,name,visual)
sz =[1,1]+list(image.shape)
image = image.reshape(*sz)
spacing = 1. / (np.array(sz[2:]) - 1)
return image,spacing
def save_3D_img_from_numpy(input,file_path,spacing=None,orgin=None,direction=None):
output = sitk.GetImageFromArray(input)
if spacing is not None:
output.SetSpacing(spacing)
if orgin is not None:
output.SetOrigin(orgin)
if direction is not None:
output.SetDirection(direction)
os.makedirs(os.path.split(file_path)[0], exist_ok=True)
sitk.WriteImage(output, file_path)
def save_3D_img_from_itk(output,file_path,spacing=None,orgin=None,direction=None):
if spacing is not None:
output.SetSpacing(spacing)
if orgin is not None:
output.SetOrigin(orgin)
if direction is not None:
output.SetDirection(direction)
os.makedirs(os.path.split(file_path)[0], exist_ok=True)
sitk.WriteImage(output, file_path)
def save_jacobi_map(map,img_sz,fname,output_path,save_neg_jacobi=True):
img_sz = np.array(img_sz)
map_sz = np.array(map.shape[2:])
spacing = 1. / (np.array(img_sz) - 1) # the disp coorindate is [-1,1]
need_resampling = not all(list(img_sz==map_sz))
if need_resampling:
id = py_utils.identity_map_multiN(img_sz, spacing)
map = py_utils.compute_warped_image_multiNC(map, id, spacing, 1,
zero_boundary=False)
map = map.detach().cpu().numpy()
fd = fdt.FD_np(spacing)
dfx = fd.dXc(map[:, 0, ...])
dfy = fd.dYc(map[:, 1, ...])
dfz = fd.dZc(map[:, 2, ...])
jacobi_det = dfx * dfy * dfz
# self.temp_save_Jacobi_image(jacobi_det,map)
jacobi_neg_bool = jacobi_det < 0.
jacobi_neg = jacobi_det[jacobi_neg_bool]
jacobi_abs = np.abs(jacobi_det)
jacobi_abs_scalar = - np.sum(jacobi_neg) #
jacobi_num_scalar = np.sum(jacobi_neg_bool)
print("fname:{} folds for each channel {},{},{}".format(fname,np.sum(dfx < 0.), np.sum(dfy < 0.), np.sum(dfz < 0.)))
print("fname:{} the jacobi_value of fold points is {}".format(fname,jacobi_abs_scalar))
print("fname:{} the number of fold points is {}".format(fname, jacobi_num_scalar))
for i in range(jacobi_abs.shape[0]):
if not save_neg_jacobi:
jacobi_img = sitk.GetImageFromArray(jacobi_abs[i])
else:
jacobi_img = sitk.GetImageFromArray(jacobi_neg[i])
pth = os.path.join(output_path,fname)+'.nii.gz'
sitk.WriteImage(jacobi_img, pth)
def save_smoother_map(adaptive_smoother_map,gaussian_stds,t,path=None,weighting_type=None):
dim = len(adaptive_smoother_map.shape)-2
adaptive_smoother_map = adaptive_smoother_map.detach()
if weighting_type=='w_K_w':
adaptive_smoother_map = adaptive_smoother_map**2
gaussian_stds = gaussian_stds.detach()
view_sz = [1] + [len(gaussian_stds)] + [1] * dim
gaussian_stds = gaussian_stds.view(*view_sz)
smoother_map = adaptive_smoother_map*(gaussian_stds**2)
smoother_map = torch.sqrt(torch.sum(smoother_map,1,keepdim=True))
print(t)
fname = str(t)+"sm_map"
if dim ==2:
plot_2d_img(smoother_map[0,0],fname,path)
elif dim==3:
y_half = smoother_map.shape[3]//2
plot_2d_img(smoother_map[0,0,:,y_half,:],fname,path)
def save_momentum(momentum,t=None,path=None):
dim = len(momentum.shape)-2
momentum = momentum.detach()
momentum = torch.sum(momentum**2,1,keepdim=True)
if t is not None:
print(t)
fname = str(t)+"momentum"
else:
fname = "momentum"
if dim ==2:
plot_2d_img(momentum[0,0],fname,path)
elif dim==3:
y_half = momentum.shape[3]//2
plot_2d_img(momentum[0,0,:,y_half,:],fname,path)
def save_velocity(velocity,t,path=None):
dim = len(velocity.shape)-2
velocity = velocity.detach()
velocity = torch.sum(velocity**2,1,keepdim=True)
print(t)
fname = str(t)+"velocity"
if dim ==2:
plot_2d_img(velocity[0,0],fname,path)
elif dim==3:
y_half = velocity.shape[3]//2
plot_2d_img(velocity[0,0,:,y_half,:],fname,path)
def plot_2d_img(img,name,path=None):
"""
:param img: X x Y x Z
:param name: title
:param path: saving path
:param show:
:return:
"""
sp=111
img = torch.squeeze(img)
font = {'size': 10}
plt.setp(plt.gcf(), 'facecolor', 'white')
plt.style.use('bmh')
plt.subplot(sp).set_axis_off()
plt.imshow(utils.t2np(img))#,vmin=0.0590, vmax=0.0604) #vmin=0.0590, vmax=0.0604
plt.colorbar().ax.tick_params(labelsize=10)
plt.title(name, font)
if not path:
plt.show()
else:
plt.savefig(path, dpi=300)
plt.clf()
def visualize_jacobi(phi,spacing, img=None, file_path=None, visual=True):
"""
:param phi: Bxdimx X xYxZ
:param spacing: [sx,sy,sz]
:param img: Bx1xXxYxZ
:param file_path: saving path
:return:
"""
phi_sz = phi.shape
n_batch = phi_sz[0]
dim =phi_sz[1]
phi_np = utils.t2np(phi)
if img is not None:
assert phi.shape[0] == img.shape[0]
img_np = utils.t2np(img)
fd = fdt.FD_np(spacing)
dfx = fd.dXc(phi_np[:, 0, ...])
dfy = fd.dYc(phi_np[:, 1, ...])
dfz =1.
if dim==3:
dfz = fd.dZc(phi_np[:, 2, ...])
jacobi_det = dfx * dfy * dfz
jacobi_neg = np.ma.masked_where(jacobi_det>= 0, jacobi_det)
#jacobi_neg = (jacobi_det<0).astype(np.float32)
jacobi_abs = - np.sum(jacobi_det[jacobi_det < 0.]) #
jacobi_num = np.sum(jacobi_det < 0.)
if dim==3:
print("print folds for each channel {},{},{}".format(np.sum(dfx < 0.), np.sum(dfy < 0.), np.sum(dfz < 0.)))
print("the jacobi_value of fold points for current map is {}".format(jacobi_abs))
print("the number of fold points for current map is {}".format(jacobi_num))
if visual:
for i in range(n_batch):
if dim == 2:
sp = 111
font = {'size': 10}
plt.setp(plt.gcf(), 'facecolor', 'white')
plt.style.use('bmh')
plt.subplot(sp).set_axis_off()
plt.imshow(utils.t2np(img_np[i,0]))
plt.imshow(jacobi_neg[i], cmap='gray', alpha=1.)
plt.colorbar().ax.tick_params(labelsize=10)
plt.title('img_jaocbi', font)
if not file_path:
plt.show()
else:
plt.savefig(file_path, dpi=300)
plt.clf()
if dim ==3:
if file_path:
jacobi_abs_map = np.abs(jacobi_det)
jacobi_img = sitk.GetImageFromArray(jacobi_abs_map[i])
pth = os.path.join(file_path)
sitk.WriteImage(jacobi_img, pth)
def test():
import torch
img = torch.randn(80,80)
fname = 'test_program'
output_path = '/playpen/zyshen/debugs/plot_2d'
plot_2d_img(img,fname,output_path,show=False)
| 7,597 | 31.75 | 121 | py |
easyreg | easyreg-master/tools/print_sh.py | import os
def print_txt(txt, output_path):
with open(output_path,"w") as f:
f.write(txt)
key_w_list = [10,20,30,40,60,80,100]
key_w_list2= ["1d","atlas",'rand','bspline','aug']
for key_w2 in key_w_list2:
output_path = '/playpen-raid/zyshen/debug/llf_output/par/oai_seg_{}'.format(key_w2)
os.makedirs(output_path, exist_ok=True)
file_name_list = ["oai_expr_{}.sh".format(i) for i in range(1,len(key_w_list)+1)]
for i, key_w in enumerate(key_w_list):
txt = """#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=12
#SBATCH --time=4-0
#SBATCH --mem=96G
#SBATCH --output={}case_seg_{}.txt
#SBATCH --partition=volta-gpu
#SBATCH --gres=gpu:1
#SBATCH --qos=gpu_access
source activate torch4
cd /pine/scr/z/y/zyshen/reg_clean/demo/
srun python demo_for_seg_train.py -o /pine/scr/z/y/zyshen/data/oai_seg/baseline/aug/sever/gen_lresol_{} -dtn={}case -tn=seg_par -ts=/pine/scr/z/y/zyshen/reg_clean/debug/settings/oai_seg_par -g=0
srun python demo_for_seg_eval.py -ts=/pine/scr/z/y/zyshen/reg_clean/debug/settings/oai_seg_par -txt=/pine/scr/z/y/zyshen/data/oai_seg/baseline/10case/test/file_path_list.txt -m=/pine/scr/z/y/zyshen/data/oai_seg/baseline/aug/sever/gen_lresol_{}/{}case/seg_par/checkpoints/model_best.pth.tar -o=/pine/scr/z/y/zyshen/data/oai_seg/baseline/aug/sever_res_par/gen_lresol_{}/{}case -g=0
""".format(key_w,key_w2,key_w2,key_w,key_w2,key_w,key_w2,key_w)
print_txt(txt,os.path.join(output_path,file_name_list[i]))
#
# key_w_list = [10,20,30,40,60,80,100]
# key_w_list2= ["1d"]
#
#
# for key_w2 in key_w_list2:
# output_path = '/playpen-raid/zyshen/debug/llf_output/oai_seg_{}'.format(key_w2)
# os.makedirs(output_path, exist_ok=True)
# file_name_list = ["oai_expr_{}.sh".format(i) for i in range(1,len(key_w_list)+1)]
#
# for i, key_w in enumerate(key_w_list):
# txt = """#!/bin/bash
# #SBATCH -p general
# #SBATCH -N 1
# #SBATCH --mem=8g
# #SBATCH -n 1
# #SBATCH -c 6
# #SBATCH --output=oai_expr_aug_1d_{}.txt
# #SBATCH -t 3-
#
#
# source activate torch4
# cd /pine/scr/z/y/zyshen/reg_clean/mermaid/mermaid_demos
# srun python /pine/scr/z/y/zyshen/reg_clean/mermaid/mermaid_demos/gen_aug_samples.py --txt_path=/pine/scr/z/y/zyshen/data/oai_reg/train_with_{}/momentum_lresol.txt --mermaid_setting_path=/pine/scr/z/y/zyshen/reg_clean/debug/settings/oai_reg/mermaid_nonp_settings.json --output_path=/pine/scr/z/y/zyshen/data/oai_seg/baseline/aug/gen_lresol_1d/{}case
# """.format(key_w,key_w,key_w,key_w)
#
# print_txt(txt,os.path.join(output_path,file_name_list[i]))
# key_w_list = [5,10,15,20,25]
# key_w_list2= ["1d"]
#
#
# for key_w2 in key_w_list2:
# output_path = '/playpen-raid/zyshen/debug/llf_output/lpba_seg_{}'.format(key_w2)
# os.makedirs(output_path, exist_ok=True)
# file_name_list = ["lpba_expr_{}.sh".format(i) for i in range(1,len(key_w_list)+1)]
#
# for i, key_w in enumerate(key_w_list):
# txt = """#!/bin/bash
# #SBATCH -p general
# #SBATCH -N 1
# #SBATCH --mem=8g
# #SBATCH -n 1
# #SBATCH -c 6
# #SBATCH --output=lpba_expr_aug_1d_{}.txt
# #SBATCH -t 3-
#
#
# source activate torch4
# cd /pine/scr/z/y/zyshen/reg_clean/mermaid/mermaid_demos
# srun python /playpen-raid/zyshen/reg_clean/mermaid/mermaid_demos/gen_aug_samples.py --txt_path=/playpen-raid/zyshen/data/lpba_reg/train_with_{}/lpba_ncc_reg1/momentum_lresol.txt --mermaid_setting_path=/playpen-raid/zyshen/reg_clean/debug/settings/opt_lddmm/mermaid_nonp_settings.json --output_path=/playpen-raid/zyshen/data/lpba_seg_resize/baseline/aug/gen_lresol_1d/{}case
#
# """.format(key_w,key_w,key_w)
#
# print_txt(txt,os.path.join(output_path,file_name_list[i]))
# key_w_list = [5,10,15,20,25]
# key_w_list2 = ["1d","atlas"]
#
# for key_w2 in key_w_list2:
# output_path = '/playpen-raid/zyshen/debug/llf_output/lpba_seg_{}'.format(key_w2)
# os.makedirs(output_path, exist_ok=True)
# file_name_list = ["lpba_expr_{}.sh".format(i) for i in range(1, len(key_w_list) + 1)]
#
# for i, key_w in enumerate(key_w_list):
# txt = """#!/bin/bash
# #SBATCH --ntasks=1
# #SBATCH --cpus-per-task=12
# #SBATCH --time=4-0
# #SBATCH --mem=64G
# #SBATCH --output={}case_seg_{}.txt
# #SBATCH --partition=volta-gpu
# #SBATCH --gres=gpu:1
# #SBATCH --qos=gpu_access
#
# source activate torch4
# cd /pine/scr/z/y/zyshen/reg_clean/demo/
# srun python demo_for_seg_train.py -o /pine/scr/z/y/zyshen/data/lpba_seg_resize/baseline/aug/sever/gen_lresol_{} -dtn={}case -tn=seg -ts=/pine/scr/z/y/zyshen/reg_clean/debug/settings/lpba_seg_aug -g=0
# srun python demo_for_seg_eval.py -ts=/pine/scr/z/y/zyshen/reg_clean/debug/settings/lpba_seg_aug -txt=/pine/scr/z/y/zyshen/data/lpba_seg_resize/baseline/10case/test/file_path_list.txt -m=/pine/scr/z/y/zyshen/data/lpba_seg_resize/baseline/aug/sever/gen_lresol_{}/{}case/seg/checkpoints/model_best.pth.tar -o=/pine/scr/z/y/zyshen/data/lpba_seg_resize/baseline/aug/sever_res/gen_lresol_{}/{}case -g=0
# """.format(key_w,key_w2,key_w2,key_w,key_w2,key_w,key_w2,key_w)
#
# print_txt(txt, os.path.join(output_path, file_name_list[i]))
#
#
# #
#
# key_w_list = [5,10,15,20,25]
# key_w_list2 = ["multi_reg"]
#
# for key_w2 in key_w_list2:
# output_path = '/playpen-raid/zyshen/debug/llf_output/lpba_reg_{}'.format(key_w2)
# os.makedirs(output_path, exist_ok=True)
#
# for i, key_w in enumerate(key_w_list):
# for j in range(2):
# file_name = "lpba_expr_{}.sh".format(i*2+j)
# txt = """#!/bin/bash
# #SBATCH --ntasks=1
# #SBATCH --cpus-per-task=12
# #SBATCH --time=6-0
# #SBATCH --mem=24G
# #SBATCH --output={}case_seg_{}.txt
# #SBATCH --partition=volta-gpu
# #SBATCH --gres=gpu:1
# #SBATCH --qos=gpu_access
#
# source activate torch4
# cd /pine/scr/z/y/zyshen/reg_clean/demo/
# python demo_for_easyreg_eval.py -ts=/pine/scr/z/y/zyshen/reg_clean/debug/settings/opt_lddmm_new_sm -txt=/pine/scr/z/y/zyshen/data/lpba_seg_resize/multi_reg/multi_reg_list_{}/p{}.txt -o=/pine/scr/z/y/zyshen/data/lpba_seg_resize/baseline/aug/gen_lresol_multi_reg_trans/{}case_p{} -g=0 &
# python demo_for_easyreg_eval.py -ts=/pine/scr/z/y/zyshen/reg_clean/debug/settings/opt_lddmm_new_sm -txt=/pine/scr/z/y/zyshen/data/lpba_seg_resize/multi_reg/multi_reg_list_{}/p{}.txt -o=/pine/scr/z/y/zyshen/data/lpba_seg_resize/baseline/aug/gen_lresol_multi_reg_trans/{}case_p{} -g=0
# """.format(key_w,key_w2,key_w,j*2,key_w,j*2,key_w,j*2+1,key_w,j*2+1)
#
# print_txt(txt, os.path.join(output_path, file_name))
#
# #
# #
# #
# key_w_list = [0,1,2,3,4]
# key_w_list2 = ["test_aug_opt"]
#
# for key_w2 in key_w_list2:
# output_path = '/playpen-raid/zyshen/debug/llf_output/oai_reg_{}'.format(key_w2)
# os.makedirs(output_path, exist_ok=True)
#
# for i, key_w in enumerate(key_w_list):
# file_name = "oai_expr_{}.sh".format(i)
# txt = """#!/bin/bash
# #SBATCH --ntasks=1
# #SBATCH --cpus-per-task=12
# #SBATCH --time=4-0
# #SBATCH --mem=24G
# #SBATCH --output={}case_reg_{}.txt
# #SBATCH --partition=volta-gpu
# #SBATCH --gres=gpu:1
# #SBATCH --qos=gpu_access
#
# source activate torch4
# cd /pine/scr/z/y/zyshen/reg_clean/demo/
# python demo_for_easyreg_eval.py -ts=/pine/scr/z/y/zyshen/reg_clean/debug/settings/opt_lddmm_oai -txt=/pine/scr/z/y/zyshen/data/oai_reg/train_with_test_aug_40/test/opt/p{}.txt -o=/pine/scr/z/y/zyshen/data/oai_reg/test_aug_opt/p{} -g=0
# """.format(key_w,key_w2,key_w,key_w)
#
# print_txt(txt, os.path.join(output_path, file_name))
#
#
| 7,499 | 37.265306 | 400 | py |
easyreg | easyreg-master/tools/image_rescale.py | import SimpleITK as sitk
from easyreg.reg_data_utils import write_list_into_txt, generate_pair_name
from easyreg.utils import *
import mermaid.utils as py_utils
from mermaid.data_wrapper import MyTensor
def __read_and_clean_itk_info(input):
if isinstance(input,str):
return sitk.GetImageFromArray(sitk.GetArrayFromImage(sitk.ReadImage(input)))
else:
return sitk.GetImageFromArray(sitk.GetArrayFromImage(input))
def resize_input_img_and_save_it_as_tmp(img_input,resize_factor=(1.0,1.0,1.0), is_label=False,keep_physical=True,fname=None,saving_path=None,fixed_sz=None):
"""
:param img: sitk input, factor is the outputsize/patched_sized
:param fix sz should be refered to numpy coord
:return:
"""
if isinstance(img_input, str):
img_org = sitk.ReadImage(img_input)
img = __read_and_clean_itk_info(img_input)
else:
img_org = img_input
img =__read_and_clean_itk_info(img_input)
# resampler= sitk.ResampleImageFilter()
# resampler.SetSize(img.GetSize())
# bspline = sitk.BSplineTransformInitializer(img, (5, 5, 5), 2)
# resampler.SetTransform(bspline)
# img = resampler.Execute(img)
dimension =3
img_sz = img.GetSize()
if not fixed_sz:
factor = np.flipud(resize_factor)
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
spacing_factor = [(after_size[i]-1)/(img_sz[i]-1) for i in range(len(img_sz))]
else:
fixed_sz = np.flipud(fixed_sz)
factor = [fixed_sz[i]/img_sz[i] for i in range(len(img_sz))]
spacing_factor = [(fixed_sz[i]-1)/(img_sz[i]-1) for i in range(len(img_sz))]
resize = not all([f == 1 for f in factor])
if resize:
resampler = sitk.ResampleImageFilter()
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
if fixed_sz is not None:
for i in range(len(fixed_sz)):
assert fixed_sz[i]==after_size[i]
matrix[0, 0] = 1. / spacing_factor[0]
matrix[1, 1] = 1. / spacing_factor[1]
matrix[2, 2] = 1. / spacing_factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_label:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkBSpline)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
if fname is not None:
os.makedirs(saving_path, exist_ok=True)
fpth = os.path.join(saving_path, fname)
else:
os.makedirs(os.path.split(saving_path)[0], exist_ok=True)
fpth = saving_path
if keep_physical:
img_resampled.SetSpacing(resize_spacing(img_sz, img_org.GetSpacing(), factor))
img_resampled.SetOrigin(img_org.GetOrigin())
img_resampled.SetDirection(img_org.GetDirection())
sitk.WriteImage(img_resampled, fpth)
return fpth
def resample_warped_phi_and_image(source_path,target_path, l_source_path, l_target_path, phi,spacing):
new_phi = None
warped = None
l_warped = None
new_spacing = None
if source_path is not None:
s = sitk.GetArrayFromImage(sitk.ReadImage(source_path)).astype(np.float32)
t = sitk.GetArrayFromImage(sitk.ReadImage(target_path)).astype(np.float32)
sz_t = [1, 1] + list(t.shape)
source = torch.from_numpy(s[None][None]).to(phi.device)
new_phi, new_spacing = resample_image(phi, spacing, sz_t, 1, zero_boundary=True)
warped = py_utils.compute_warped_image_multiNC(source, new_phi, new_spacing, 1, zero_boundary=True)
if l_source_path is not None:
ls = sitk.GetArrayFromImage(sitk.ReadImage(l_source_path)).astype(np.float32)
lt = sitk.GetArrayFromImage(sitk.ReadImage(l_target_path)).astype(np.float32)
sz_lt = [1, 1] + list(lt.shape)
l_source = torch.from_numpy(ls[None][None]).to(phi.device)
if new_phi is None:
new_phi, new_spacing = resample_image(phi, spacing, sz_lt, 1, zero_boundary=True)
l_warped = py_utils.compute_warped_image_multiNC(l_source, new_phi, new_spacing, 0, zero_boundary=True)
return new_phi, warped,l_warped, new_spacing
#
# def resample_warped_phi_and_image(source_path,target_path, l_source_path, l_target_path, phi,spacing):
# new_phi = None
# warped = None
# l_warped = None
# new_spacing = None
# if source_path is not None:
# s = sitk.GetArrayFromImage(sitk.ReadImage(source_path)).astype(np.float32)
# #t = sitk.GetArrayFromImage(sitk.ReadImage(target_path)).astype(np.float32)
# sz_t = [1, 1] + list(s.shape)
# source = torch.from_numpy(s[None][None]).to(phi.device)
# new_phi, new_spacing = resample_image(phi, spacing, sz_t, 1, zero_boundary=True)
# warped = py_utils.compute_warped_image_multiNC(source, new_phi, new_spacing, 1, zero_boundary=True)
#
#
# if l_source_path is not None:
# ls = sitk.GetArrayFromImage(sitk.ReadImage(l_source_path)).astype(np.float32)
# #lt = sitk.GetArrayFromImage(sitk.ReadImage(l_target_path)).astype(np.float32)
# sz_lt = [1, 1] + list(ls.shape)
# l_source = torch.from_numpy(ls[None][None]).to(phi.device)
# if new_phi is None:
# new_phi, new_spacing = resample_image(phi, spacing, sz_lt, 1, zero_boundary=True)
# l_warped = py_utils.compute_warped_image_multiNC(l_source, new_phi, new_spacing, 0, zero_boundary=True)
#
# return new_phi, warped,l_warped, new_spacing
def save_transform_with_reference(transform, spacing,moving_reference_list, target_reference_list, path=None, fname_list=None,save_disp_into_itk_format=True):
if not save_disp_into_itk_format:
save_transfrom(transform, spacing, path, fname_list)
else:
save_transform_itk(transform,spacing,moving_reference_list, target_reference_list, path, fname_list)
def save_transform_itk(transform,spacing,moving_list,target_list, path, fname_list):
from mermaid.utils import identity_map
if type(transform) == torch.Tensor:
transform = transform.detach().cpu().numpy()
for i in range(transform.shape[0]):
cur_trans = transform[i]
img_sz = np.array(transform.shape[2:])
moving_ref = sitk.ReadImage(moving_list[i])
moving_spacing_ref = moving_ref.GetSpacing()
moving_direc_ref = moving_ref.GetDirection()
moving_orig_ref = moving_ref.GetOrigin()
target_ref = sitk.ReadImage(target_list[i])
target_spacing_ref = target_ref.GetSpacing()
target_direc_ref = target_ref.GetDirection()
target_orig_ref = target_ref.GetOrigin()
id_np_moving = identity_map(img_sz, np.flipud(moving_spacing_ref))
id_np_target = identity_map(img_sz, np.flipud(target_spacing_ref))
factor = np.flipud(moving_spacing_ref) / spacing
factor = factor.reshape(3,1,1,1)
moving_direc_matrix = np.array(moving_direc_ref).reshape(3, 3)
target_direc_matrix = np.array(target_direc_ref).reshape(3, 3)
cur_trans = np.matmul(moving_direc_matrix, permute_trans(id_np_moving + cur_trans * factor).reshape(3, -1)) \
- np.matmul(target_direc_matrix, permute_trans(id_np_target).reshape(3, -1))
cur_trans = cur_trans.reshape(id_np_moving.shape)
fn = '{}_batch_'.format(i) + fname_list if not type(fname_list) == list else fname_list[i]
saving_path = os.path.join(path, fn + '.h5')
bias = np.array(target_orig_ref)-np.array(moving_orig_ref)
bias = -bias.reshape(3,1,1,1)
transform_physic = cur_trans +bias
trans = get_transform_with_itk_format(transform_physic,target_spacing_ref, target_orig_ref,target_direc_ref)
#sitk.WriteTransform(trans, saving_path)
# Retrive the DField from the Transform
dfield = trans.GetDisplacementField()
# Fitting a BSpline from the Deformation Field
bstx = dfield2bspline(dfield, verbose=True)
# Save the BSpline Transform
sitk.WriteTransform(bstx, saving_path.replace('.h5', '.tfm'))
def permute_trans(trans):
trans_new = np.zeros_like(trans)
trans_new[0,...] = trans[2,...]
trans_new[1,...] = trans[1,...]
trans_new[2,...] = trans[0,...]
return trans_new
def save_transfrom(transform,spacing, path=None, fname=None,using_affine=False):
if not using_affine:
if type(transform) == torch.Tensor:
transform = transform.detach().cpu().numpy()
img_sz = np.array(transform.shape[2:])
# mapping into 0, 1 coordinate
for i in range(len(img_sz)):
transform[:, i, ...] = transform[:, i, ...] / ((img_sz[i] - 1) * spacing[i])
import nibabel as nib
for i in range(transform.shape[0]):
phi = nib.Nifti1Image(transform[i], np.eye(4))
fn = '{}_batch_'.format(i)+fname if not type(fname)==list else fname[i]
nib.save(phi, os.path.join(path, fn+'_phi.nii.gz'))
else:
affine_param = transform
if isinstance(affine_param, list):
affine_param =affine_param[0]
affine_param = affine_param.detach().cpu().numpy()
for i in range(affine_param.shape[0]): # todo the bias part of the affine param should also normalized into non-physical space [0,1]
fn = '{}_batch_'.format(i)+fname if not type(fname)==list else fname[i]
np.save(os.path.join(path, fn + '_affine.npy'), affine_param[i])
def save_image_with_given_reference(img=None,reference_list=None,path=None,fname=None):
"""
:param img: Nx1xDxHxW
:param reference_list: N list
:param path: N list
:param fname: N list
:return:
"""
num_img = len(fname) if fname is not None else 0
os.makedirs(path,exist_ok=True)
for i in range(num_img):
img_ref = sitk.ReadImage(reference_list[i])
if img is not None:
if type(img) == torch.Tensor:
img = img.detach().cpu().numpy()
spacing_ref = img_ref.GetSpacing()
direc_ref = img_ref.GetDirection()
orig_ref = img_ref.GetOrigin()
img_itk = sitk.GetImageFromArray(img[i,0])
img_itk.SetSpacing(spacing_ref)
img_itk.SetDirection(direc_ref)
img_itk.SetOrigin(orig_ref)
else:
img_itk=img_ref
fn = '{}_batch_'.format(i)+fname if not type(fname)==list else fname[i]
fpath = os.path.join(path,fn+'.nii.gz')
sitk.WriteImage(img_itk,fpath)
def init_env(output_path, source_path_list, target_path_list, l_source_path_list=None, l_target_path_list=None):
"""
:param task_full_path: the path of a completed task
:param source_path: path of the source image
:param target_path: path of the target image
:param l_source: path of the label of the source image
:param l_target: path of the label of the target image
:return: None
"""
file_num = len(source_path_list)
assert len(source_path_list) == len(target_path_list)
if l_source_path_list is not None and l_target_path_list is not None:
assert len(source_path_list) == len(l_source_path_list)
file_list = [[source_path_list[i], target_path_list[i],l_source_path_list[i],l_target_path_list[i]] for i in range(file_num)]
else:
file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)]
os.makedirs(os.path.join(output_path,'reg/test'),exist_ok=True)
os.makedirs(os.path.join(output_path,'reg/res'),exist_ok=True)
pair_txt_path = os.path.join(output_path,'reg/test/pair_path_list.txt')
fn_txt_path = os.path.join(output_path,'reg/test/pair_name_list.txt')
fname_list = [generate_pair_name([file_list[i][0],file_list[i][1]]) for i in range(file_num)]
write_list_into_txt(pair_txt_path,file_list)
write_list_into_txt(fn_txt_path,fname_list)
root_path = output_path
data_task_name = 'reg'
cur_task_name = 'res'
return root_path
# debug_path = '/playpen/zyshen/data_pre/down_sampled_training_for_intra/'
#
# img_list_txt_path = '/playpen/zyshen/debugs/get_val_and_debug_res/debug.txt'
# output_path = '/playpen/zyshen/debugs/zhengyang'
# source_path_list, target_path_list, l_source_path_list, l_target_path_list = loading_img_list_from_files(
# img_list_txt_path)
# init_env(output_path,source_path_list,target_path_list,l_source_path_list,l_target_path_list)
# path = '/playpen/zyshen/debugs/dct/OAS30006_MR_d0166_brain_origin.nii.gz'
# saving_path='/playpen/zyshen/debugs/dct'
# resize_input_img_and_save_it_as_tmp(path,is_label=False,saving_path=saving_path,fname='bspline_test.nii.gz')
| 13,185 | 42.375 | 158 | py |
easyreg | easyreg-master/data_pre/fileio.py | """
Helper functions to take care of all the file IO
"""
import itk
import os
import nrrd
import torch
import data_pre.image_manipulations as IM
import numpy as np
from abc import ABCMeta, abstractmethod
class FileIO(object):
"""
Abstract base class for file i/o.
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
Constructor
"""
# check if we are dealing with a nrrd file
def _is_nrrd_filename(self,filename):
sf = os.path.splitext(filename)
ext = sf[1].lower()
if ext == '.nrrd':
return True
elif ext == '.nhdr':
return True
else:
return False
def _convert_itk_vector_to_numpy(self,v):
return itk.GetArrayFromVnlVector(v.Get_vnl_vector())
def _convert_itk_matrix_to_numpy(self,M):
return itk.GetArrayFromVnlMatrix(M.GetVnlMatrix().as_matrix())
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
return (v.data).cpu().numpy()
def _convert_data_to_numpy_if_needed(self,data):
if ( type( data ) == torch.autograd.variable.Variable ) or \
(type(data) == torch.torch.nn.parameter.Parameter) or \
(type(data) == torch.FloatTensor) or \
(type(data) == torch.DoubleTensor) or \
(type(data) == torch.HalfTensor) or \
(type(data) == torch.ByteTensor) or \
(type(data) == torch.CharTensor) or \
(type(data) == torch.ShortTensor) or \
(type(data) == torch.IntTensor) or \
(type(data) == torch.LongTensor) or \
(type(data) == torch.cuda.FloatTensor) or \
(type(data) == torch.cuda.DoubleTensor) or \
(type(data) == torch.cuda.HalfTensor) or \
(type(data) == torch.cuda.ByteTensor) or \
(type(data) == torch.cuda.CharTensor) or \
(type(data) == torch.cuda.ShortTensor) or \
(type(data) == torch.cuda.IntTensor) or \
(type(data) == torch.cuda.LongTensor):
return self.t2np(data)
else:
return data
@abstractmethod
def read(self, filename):
"""
Abstract method to read a file
:param filename: file to be read
:return: Will return the read file and its header information (as a tuple; im,hdr)
"""
pass
@abstractmethod
def write(self, filename, data, hdr):
"""
Abstract method to write a file
:param filename: filename to write the data to
:param data: data array that should be written (will be converted to numpy on the fly if necessary)
:param hdr: hdr information for the file (in form of a dictionary)
:return: n/a
"""
pass
class ImageIO(FileIO):
"""
Class to read images
"""
def __init__(self):
super(ImageIO, self).__init__()
self.intensity_normalize_image = False
"""Intensity normalizes an image after reading (default: False)"""
self.squeeze_image = False
"""squeezes the image when reading; e.g., dimension 1x256x256 becomes 256x256"""
self.adaptive_padding = -1
""" padding the img to favorable size default img.shape%adaptive_padding = 0"""
def turn_intensity_normalization_on(self):
"""
Turns on intensity normalization on when loading an image
"""
self.intensity_normalize_image = True
def turn_intensity_normalization_off(self):
"""
Turns on intensity normalization off when loading an image
"""
self.intensity_normalize_image = False
def set_intensity_normalization(self, int_norm):
"""
Set if intensity normalization should be on (True) or off (False)
:param int_norm: image intensity normalization on (True) or off (False)
"""
self.intensity_normalize_image = int_norm
def set_adaptive_padding(self, adaptive_padding):
"""
if adaptive_padding != -1 adaptive padding on
padding the img to favorable size e.g img.shape%adaptive_padding = 0
padding size should be bigger than 3, to avoid confused with channel
:param adaptive_padding:
:return:
"""
if adaptive_padding<4 and adaptive_padding != -1:
raise ValueError("may confused with channel, adaptive padding must bigger than 4")
self.adaptive_padding = adaptive_padding
def get_intensity_normalization(self):
"""
Returns if image will be intensity normalized when loading
:return: Returns True if image will be intensity normalized when loading
"""
return self.intensity_normalize_image
def turn_squeeze_image_on(self):
"""
Squeezes the image when loading
"""
self.squeeze_image = True
def turn_squeeze_image_off(self):
"""
Does not squeeze image when loading
"""
self.squeeze_image = False
def set_squeeze_image(self, squeeze_im):
"""
Set if image should be squeezed (True) or not (False)
:param squeeze_im: squeeze image on (True) or off (False)
"""
self.squeeze_image = squeeze_im
def get_squeeze_image(self):
"""
Returns if image will be squeezed when loading
:return: Returns True if image will be squeezed when loading
"""
return self.squeeze_image
def _compute_squeezed_spacing(self,spacing0, dim0, sz0, dimSqueezed):
"""
Extracts the spacing information for non-trivial dimensions (i.e., with more than one entry)
:param spacing0: original spacing information
:param dim0: original dimension
:param sz0: original size
:param dimSqueezed: dimension after squeezing
:return: returns only the spacing information for the dimensions with more than one entry
"""
spacing = np.zeros(dimSqueezed)
j = 0
for i in range(dim0):
if sz0[i] != 1:
spacing[j] = spacing0[i]
j += 1
return spacing
def _transform_image_to_NC_image_format(self, I):
'''
Takes an input image and returns it in the format which is typical for torch.
I.e., two dimensions are added (the first one for number of images and the second for the
number of channels). As were are dealing with individual single-channel intensity images here, these
dimensions will be 1x1
:param I: input image of size, sz
:return: input image, reshaped to size [1,1] + sz
'''
return I.reshape([1, 1] + list(I.shape))
def _try_fixing_image_dimension(self, im, map):
im_fixed = None # default, if we cannot fix it
# try to detect cases such as 128x128x1 and convert them to 1x1x128x128
si = im.shape
sm = map.shape
# get the image dimension from the map (always has to be the second entry)
dim = sm[1]
if len(si) != len(sm):
# most likely we are not dealing with a batch of images and have a dimension that needs to be removed
im_s = im.squeeze()
dim_s = len(im_s.shape)
if dim_s == dim:
# we can likely fix it, because this is an individual image
im_fixed = self._transform_image_to_NC_image_format(im_s)
print('Attempted to fix image dimensions for compatibility with map.')
print('Modified shape from ' + str(si) + ' to ' + str(im_fixed.shape))
return im_fixed
def _map_is_compatible_with_image(self,im, map):
si = im.shape
sm = map.shape
if len(si) != len(sm):
return False
else:
if si[0] != sm[0]:
return False
else:
for i in range(2, len(si)):
if si[i] != sm[i]:
return False
return True
def _convert_itk_image_to_numpy(self,I0_itk):
I0 = itk.GetArrayViewFromImage(I0_itk)
image_meta_data = dict()
image_meta_data['space origin'] = self._convert_itk_vector_to_numpy(I0_itk.GetOrigin())
image_meta_data['spacing'] = self._convert_itk_vector_to_numpy(I0_itk.GetSpacing())
image_meta_data['space directions'] = self._convert_itk_matrix_to_numpy(I0_itk.GetDirection())
image_meta_data['sizes'] = I0.shape
image_meta_data['dimension'] = I0_itk.GetImageDimension()
image_meta_data['space'] = 'left-posterior-superior'
"""
NRRD format
{u'dimension': 3,
u'encoding': 'gzip',
u'endian': 'little',
u'keyvaluepairs': {},
u'kinds': ['domain', 'domain', 'domain'],
u'sizes': [128, 128, 1],
u'space': 'left-posterior-superior',
u'space directions': [['2', '0', '0'], ['0', '2', '0'], ['0', '0', '2']],
u'space origin': ['0', '0', '0'],
u'type': 'float'}
"""
return I0, image_meta_data
def _do_adaptive_padding(self, im):
"""
padding the img to favored size, (divided by certain number, here is 4), here using default 4 , favored by cuda fft
:param im:
:return:
"""
im_sz = list(im.shape)
dim = len(im_sz)
dim_to_pad = [dim_sz%self.adaptive_padding!=0 and dim_sz>3 for dim_sz in im_sz]
dim_rem = [dim_sz//self.adaptive_padding for dim_sz in im_sz]
new_dim_sz = [(dim_rem[i]+1)*self.adaptive_padding if dim_to_pad[i] else im_sz[i] for i in range(dim)]
before_id = [(new_dim_sz[i] -im_sz[i]+1)//2 for i in range(dim)]
after_id = [new_dim_sz[i] - im_sz[i] - before_id[i] for i in range(dim)]
padding_loc = tuple([(before_id[i],after_id[i]) for i in range(dim)])
new_img = np.lib.pad(im, padding_loc, 'edge')
return new_img
def _do_adaptive_shrinking(self, im):
"""
padding the img to favored size, (divided by certain number, here is 4), here using default 4 , favored by cuda fft
:param im:
:return:
"""
im_sz = list(im.shape)
dim = len(im_sz)
dim_to_pad = [dim_sz%self.adaptive_padding!=0 and dim_sz>3 for dim_sz in im_sz]
dim_rem = [dim_sz//self.adaptive_padding for dim_sz in im_sz]
new_dim_sz = [(dim_rem[i])*self.adaptive_padding if dim_to_pad[i] else im_sz[i] for i in range(dim)]
before_id = [(-new_dim_sz[i] +im_sz[i]+1)//2 for i in range(dim)]
after_id = [new_dim_sz[i] + before_id[i] for i in range(dim)]
new_img = im[before_id[0]:after_id[0],before_id[1]:after_id[1],before_id[2]:after_id[2]].copy()
return new_img
def read(self, filename, intensity_normalize=False, squeeze_image=False, adaptive_padding=-1,inverse=False, verbose=False):
"""
Reads the image assuming and converts it to NxCxXxYxC format if needed
:param filename: filename to be read
:param intensity_normalize: uses image intensity normalization
:param squeeze_image: squeezes image first (e.g, from 1x128x128 to 128x128)
:return: Will return the read file, its header information, the spacing, and the normalized spacing \
(as a tuple: im,hdr,spacing,normalized_spacing)
"""
self.set_intensity_normalization(intensity_normalize)
self.set_squeeze_image(squeeze_image)
self.set_adaptive_padding(adaptive_padding)
if verbose:
print('Reading image: ' + filename)
if self._is_nrrd_filename(filename):
# load with the dedicated nrrd reader (can also read higher dimensional files)
im, hdr = nrrd.read(filename)
else:
# read with the itk reader (can also read other file formats)
im_itk = itk.imread(filename)
im, hdr = self._convert_itk_image_to_numpy(im_itk)
if 'spacing' not in hdr:
print('Image does not seem to have spacing information.')
if 'sizes' in hdr:
dim_guess = len( hdr['sizes'] )
else:
dim_guess = len( im.shape )
print('Guessed dimension to be dim = ' + str( dim_guess ))
spacing = np.ones( dim_guess )
hdr['spacing'] = spacing
print('Using guessed spacing of ' + str(spacing))
spacing = hdr['spacing']
normalized_spacing = spacing # will be changed if image is squeezed
if self.squeeze_image==True:
if verbose:
print('Squeezing image')
dim = len(im.shape)
sz = im.shape
im = im.squeeze()
dimSqueezed = len(im.shape)
sz_squeezed = im.shape
if dim!=dimSqueezed:
if verbose:
print('Squeezing changed dimension from ' + str(dim) + ' -> ' + str(dimSqueezed))
squeezed_spacing = self._compute_squeezed_spacing(spacing,dim,sz,dimSqueezed)
if verbose:
print( 'squeezed_spacing = ' + str(squeezed_spacing))
normalized_spacing = squeezed_spacing / (np.array(sz_squeezed) - 1)
if verbose:
print('Normalized spacing = ' + str(normalized_spacing))
if adaptive_padding>0:
if not inverse:
im = self._do_adaptive_padding(im)
else:
im = self._do_adaptive_shrinking(im)
if self.intensity_normalize_image==True:
im = IM.IntensityNormalizeImage().defaultIntensityNormalization(im)
else:
if verbose:
print('WARNING: Image was NOT intensity normalized when loading.')
return im,hdr,spacing,normalized_spacing
def read_to_nc_format(self,filename,intensity_normalize=False,squeeze_image=False ):
"""
Reads the image assuming it is single channel and of XxYxZ format and convert it to NxCxXxYxC format
:param filename: filename to be read
:param squeeze_image: squeezes image first (e.g, from 1x128x128 to 128x128)
:return: Will return the read file, its header information, the spacing, and the normalized spacing \
(as a tuple: im,hdr,spacing,normalized_spacing)
"""
im,hdr,spacing,normalized_spacing = self.read(filename,intensity_normalize,squeeze_image)
im = self._transform_image_to_NC_image_format(im)
return im,hdr,spacing,normalized_spacing
def read_to_map_compatible_format(self,filename,map,intensity_normalize=False,squeeze_image=False):
"""
Reads the image and makes sure it is compatiblle with the map. If it is not it tries to fix the format.
:param filename: filename to be read
:param map: map which is used to determine the format
:return: Will return the read file, its header information, the spacing, and the normalized spacing \
(as a tuple: im,hdr,spacing,normalized_spacing)
"""
if map is None:
print('Map needs to be specified. Currently set to None. Aborting.')
return None,None,None,None
im,hdr,spacing,normalized_spacing = self.read(filename,intensity_normalize,squeeze_image)
if not self._map_is_compatible_with_image(im, map):
im_fixed = self._try_fixing_image_dimension(im, map)
if im_fixed is None:
print('Cannot apply map to image due to dimension mismatch')
print('Attempt at automatically fixing dimensions failed')
print('Image dimension:')
print(im.shape)
print('Map dimension:')
print(map.shape)
return None,None,None,None
else:
im = im_fixed
return im,hdr,spacing,normalized_spacing
def write(self, filename, data, hdr):
if not self._is_nrrd_filename(filename):
print('Sorry, currently only nrrd files are supported as output. Aborting.')
return
# now write it out
print('Writing image: ' + filename)
nrrd.write(filename, self._convert_data_to_numpy_if_needed( data ), hdr)
class GenericIO(FileIO):
"""
Class to read images
"""
def __init__(self):
super(GenericIO, self).__init__()
def read(self, filename):
if not self._is_nrrd_filename(filename):
print('Sorry, currently only nrrd files are supported when reading. Aborting.')
return None, None
else:
print('Reading: ' + filename)
map, map_hdr = nrrd.read(filename)
return map, map_hdr
def write(self, filename, data, hdr):
if not self._is_nrrd_filename(filename):
print('Sorry, currently only nrrd files are supported when writing. Aborting.')
return
else:
print('Writing: ' + filename)
nrrd.write(filename, self._convert_data_to_numpy_if_needed( data ), hdr)
class MapIO(GenericIO):
"""
Class to read images
"""
def __init__(self):
super(MapIO, self).__init__()
| 17,393 | 36.568035 | 127 | py |
easyreg | easyreg-master/data_pre/partition_multi_channel.py | import numpy as np
import SimpleITK as sitk
def partition_multi(option_p, patch_size,overlap_size, mode=None, img_sz=(-1,-1,-1), flicker_on=False, flicker_mode='rand'):
padding_mode = option_p[('padding_mode', 'reflect', 'padding_mode')]
mode = mode if mode is not None else option_p[('mode', 'pred', 'eval or pred')]
flicker_range = option_p[('flicker_range', 0, 'flicker range')]
partition = Partition_Multi(patch_size, overlap_size, padding_mode=padding_mode, mode=mode,img_sz=img_sz, flicker_on=flicker_on,flicker_range=flicker_range, flicker_mode=flicker_mode)
return partition
class Partition_Multi(object):
"""partition a 3D volume into small 3D patches using the overlap tiling strategy described in paper:
"U-net: Convolutional networks for biomedical image segmentation." by Ronneberger, Olaf, Philipp Fischer,
and Thomas Brox. In International Conference on Medical Image Computing and Computer-Assisted Intervention,
pp. 234-241. Springer, Cham, 2015.
Note: BE CAREFUL about the order of dimensions for image:
The simpleITK image are in order x, y, z
The numpy array/torch tensor are in order z, y, x
:param tile_size (tuple of 3 or 1x3 np array): size of partitioned patches
:param self.overlap_size (tuple of 3 or 1x3 np array): the size of overlapping region at both end of each dimension
:param padding_mode (tuple of 3 or 1x3 np array): the mode of numpy.pad when padding extra region for image
:param mode: "pred": only image is partitioned; "eval": both image and segmentation are partitioned TODO
"""
def __init__(self, tile_size, overlap_size, padding_mode='reflect', mode="pred", img_sz=None,flicker_on=False,flicker_range=0,flicker_mode='rand'):
self.tile_size = np.flipud(np.asarray(tile_size)) # convert the itk coord to np coord
self.overlap_size = np.flipud(np.asarray(overlap_size)) # convert the itk coord to np coord
self.image_size = img_sz
self.padding_mode = padding_mode
self.mode = mode
self.flicker_on=flicker_on
self.flicker_range = flicker_range
self.flicker_mode= flicker_mode
def __call__(self, sample,disp=0):
"""
:param image: (simpleITK image) 3D Image to be partitioned
:param seg: (simpleITK image) 3D segmentation label mask to be partitioned
:return: N partitioned image and label patches
{'image': Nx1xDxHxW, 'label': Nx1xDxHxW }
"""
# get numpy array from simpleITK images
images_t = sample['image']
#self.image = sample['image']
is_numpy = False
if not isinstance(images_t,list):
# is not list, then it should be itk image
images = [sitk.GetArrayFromImage(images_t)]
else:
if not isinstance(images_t[0], np.ndarray):
images = [ sitk.GetArrayFromImage(image) for image in images_t]
else:
is_numpy = True
images = images_t
if 'label' in sample:
if not is_numpy:
seg_np = sitk.GetArrayFromImage(sample['label'])
else:
seg_np = sample['label']
self.image_size = np.array(images[0].shape) # np coord
self.effective_size = self.tile_size - self.overlap_size * 2 # size effective region of tiles after cropping
self.tiles_grid_size = np.ceil(self.image_size / self.effective_size).astype(int) # size of tiles grid
self.padded_size = self.effective_size * self.tiles_grid_size + self.overlap_size * 2 - self.image_size # size difference of padded image with original image
#print("partitioning, the padded size is {}".format(self.padded_size))
if self.flicker_on:
pp = self.flicker_range
else:
pp=0
if self.mode == 'eval':
seg_padded = np.pad(seg_np,
pad_width=((self.overlap_size[0]+pp, self.padded_size[0] - self.overlap_size[0]+pp),
(self.overlap_size[1]+pp, self.padded_size[1] - self.overlap_size[1]+pp),
(self.overlap_size[2]+pp, self.padded_size[2] - self.overlap_size[2]+pp)),
mode=self.padding_mode)
image_tile_list = []
start_coord_list = []
seg_tile_list = []
image_padded_list = [np.pad(image_np,
pad_width=((self.overlap_size[0] + pp, self.padded_size[0] - self.overlap_size[0] + pp),
(self.overlap_size[1] + pp, self.padded_size[1] - self.overlap_size[1] + pp),
(self.overlap_size[2] + pp, self.padded_size[2] - self.overlap_size[2] + pp)),
mode=self.padding_mode) for image_np in images]
for i in range(self.tiles_grid_size[0]):
for j in range(self.tiles_grid_size[1]):
for k in range(self.tiles_grid_size[2]):
if self.flicker_on:
if self.flicker_mode=='rand':
ri, rj, rk = [np.random.randint(-self.flicker_range,self.flicker_range) for _ in range(3)]
elif self.flicker_mode =='ensemble':
ri,rj,rk = disp
else:
ri,rj,rk= 0, 0, 0
image_tile_temp_list = [image_padded[
i * self.effective_size[0]+ri+pp:i * self.effective_size[0] + self.tile_size[0]+ri+pp,
j * self.effective_size[1]+rj+pp:j * self.effective_size[1] + self.tile_size[1]+rj+pp,
k * self.effective_size[2]+rk+pp:k * self.effective_size[2] + self.tile_size[2]+rk+pp]
for image_padded in image_padded_list]
image_tile_list.append(np.stack(image_tile_temp_list,0))
start_coord_list.append((i * self.effective_size[0]+ri,j * self.effective_size[1]+rj,k * self.effective_size[2]+rk))
if self.mode == 'eval':
seg_tile_temp = seg_padded[
i * self.effective_size[0]+ri+pp:i * self.effective_size[0] + self.tile_size[0]+ri+pp,
j * self.effective_size[1]+rj+pp:j * self.effective_size[1] + self.tile_size[1]+rj+pp,
k * self.effective_size[2]+rk+pp:k * self.effective_size[2] + self.tile_size[2]+rk+pp]
seg_tile_list.append(np.expand_dims(seg_tile_temp, axis=0))
# sample['image'] = np.stack(image_tile_list, 0)
# sample['segmentation'] = np.stack(seg_tile_list, 0)
trans_sample ={}
trans_sample['image'] = np.stack(image_tile_list, 0) # N*C*xyz
if 'label'in sample:
if self.mode == 'pred':
trans_sample['label'] = np.expand_dims(np.expand_dims(seg_np, axis=0), axis=0) #1*XYZ
else:
trans_sample['label'] = np.stack(seg_tile_list, 0) # N*1*xyz
trans_sample['tile_size'] = self.tile_size
trans_sample['overlap_size'] = self.overlap_size
trans_sample['padding_mode'] = self.padding_mode
trans_sample['flicker_on'] = self.flicker_on
trans_sample['disp'] = disp
trans_sample['num_crops_per_img'] = len(image_tile_list)
trans_sample['start_coord_list'] = start_coord_list
return trans_sample
def assemble(self, tiles,image_size=None, is_vote=False):
"""
Assembles segmentation of small patches into the original size
:param tiles: Nxhxdxw tensor contains N small patches of size hxdxw
:param is_vote:
:return: a segmentation information
"""
if image_size is not None:
self.image_size = image_size
self.effective_size = self.tile_size - self.overlap_size * 2 # size effective region of tiles after cropping
self.tiles_grid_size = np.ceil(self.image_size / self.effective_size).astype(int) # size of tiles grid
self.padded_size = self.effective_size * self.tiles_grid_size + self.overlap_size * 2 - self.image_size # size difference of padded image with original image
tiles = tiles.numpy()
seg_reassemble = np.zeros(tiles.shape[0],tiles.shape[1], self.effective_size * self.tiles_grid_size)
for i in range(self.tiles_grid_size[0]):
for j in range(self.tiles_grid_size[1]):
for k in range(self.tiles_grid_size[2]):
ind = i * self.tiles_grid_size[1] * self.tiles_grid_size[2] + j * self.tiles_grid_size[2] + k
seg_reassemble[:,:,i * self.effective_size[0]:(i + 1) * self.effective_size[0],
j * self.effective_size[1]:(j + 1) * self.effective_size[1],
k * self.effective_size[2]:(k + 1) * self.effective_size[2]] = \
tiles[ind][:,:,self.overlap_size[0]:self.tile_size[0] - self.overlap_size[0],
self.overlap_size[1]:self.tile_size[1] - self.overlap_size[1],
self.overlap_size[2]:self.tile_size[2] - self.overlap_size[2]]
seg_reassemble = seg_reassemble[:,:,self.image_size[0], :self.image_size[1], :self.image_size[2]]
# seg_image = sitk.GetImageFromArray(seg_reassemble)
# seg_image.CopyInformation(self.image)
return seg_reassemble
| 9,615 | 53.636364 | 187 | py |
easyreg | easyreg-master/data_pre/transform_pool.py | """ classes of transformations for 3d simpleITK image
"""
import threading
import SimpleITK as sitk
import numpy as np
import torch
import math
import random
import time
from math import floor
class Resample(object):
"""Resample the volume in a sample to a given voxel size
Args:
voxel_size (float or tuple): Desired output size.
If float, output volume is isotropic.
If tuple, output voxel size is matched with voxel size
Currently only support linear interpolation method
"""
def __init__(self, voxel_size):
assert isinstance(voxel_size, (float, tuple))
if isinstance(voxel_size, float):
self.voxel_size = (voxel_size, voxel_size, voxel_size)
else:
assert len(voxel_size) == 3
self.voxel_size = voxel_size
def __call__(self, sample):
img, seg = sample['image'], sample['label']
old_spacing = img.GetSpacing()
old_size = img.GetSize()
new_spacing = self.voxel_size
new_size = []
for i in range(3):
new_size.append(int(math.ceil(old_spacing[i] * old_size[i] / new_spacing[i])))
new_size = tuple(new_size)
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(1)
resampler.SetOutputSpacing(new_spacing)
resampler.SetSize(new_size)
# resample on image
resampler.SetOutputOrigin(img.GetOrigin())
resampler.SetOutputDirection(img.GetDirection())
print("Resampling image...")
sample['image'] = resampler.Execute(img)
# resample on segmentation
resampler.SetOutputOrigin(seg.GetOrigin())
resampler.SetOutputDirection(seg.GetDirection())
print("Resampling segmentation...")
sample['label'] = resampler.Execute(seg)
return sample
class Normalization(object):
"""Normalize an image by setting its mean to zero and variance to one."""
def __call__(self, sample):
self.normalizeFilter = sitk.NormalizeImageFilter()
print("Normalizing image...")
img, seg = sample['image'], sample['label']
sample['image'] = self.normalizeFilter.Execute(img)
return sample
class SitkToTensor(object):
"""Convert sitk image to 4D Tensors with shape(1, D, H, W)"""
def __call__(self, sample):
img, seg = sample['image'], sample['label']
img_np = sitk.GetArrayFromImage(img)
seg_np = sitk.GetArrayFromImage(seg)
# threshold image intensity to 0~1
img_np[np.where(img_np > 1.0)] = 1.0
img_np[np.where(img_np < 0.0)] = 0.0
img_np = np.float32(img_np)
seg_np = np.uint8(seg_np)
img_np = np.expand_dims(img_np, axis=0) # expand the channel dimension
sample['image'] = torch.from_numpy(img_np)
sample['label'] = torch.from_numpy(seg_np)
return sample
class RandomBSplineTransform(object):
"""
Apply random BSpline Transformation to a 3D image
check https://itk.org/Doxygen/html/classitk_1_1BSplineTransform.html for details of BSpline Transform
"""
def __init__(self, mesh_size=(3,3,3), bspline_order=2, deform_scale=1.0, ratio=0.5, interpolator=sitk.sitkLinear,
random_mode = 'Normal'):
self.mesh_size = mesh_size
self.bspline_order = bspline_order
self.deform_scale = deform_scale
self.ratio = ratio # control the probability of conduct transform
self.interpolator = interpolator
self.random_mode = random_mode
def __call__(self, sample):
random_state = np.random.RandomState(int(time.time()))
if np.random.rand(1)[0] < self.ratio:
img, seg = sample['image'], sample['label']
# initialize a bspline transform
bspline = sitk.BSplineTransformInitializer(img, self.mesh_size, self.bspline_order)
# generate random displacement for control points, the deformation is scaled by deform_scale
if self.random_mode == 'Normal':
control_point_displacements = random_state.normal(0, self.deform_scale/2, len(bspline.GetParameters()))
elif self.random_mode == 'Uniform':
control_point_displacements = random_state.random(len(bspline.GetParameters())) * self.deform_scale
control_point_displacements[0:int(len(control_point_displacements) / 3)] = 0 # remove z displacement
bspline.SetParameters(control_point_displacements)
# deform and resample image
img_trans = resample(img, bspline, interpolator=self.interpolator, default_value=0.1)
seg_trans = resample(seg, bspline, interpolator=sitk.sitkNearestNeighbor, default_value=0)
sample['image'] = img_trans
sample['label'] = seg_trans
return sample
class RandomRigidTransform(object):
"""
Apply random similarity Transformation to a 3D image
"""
def __init__(self, ratio=1.0, rotation_center=None, rotation_angles=(0.0, 0.0, 0.0), translation=(0.0, 0.0, 0.0),
interpolator=sitk.sitkLinear, mode='both'):
self.rotation_center = rotation_center
self.rotation_angles = rotation_angles
self.translation = translation
self.interpolator = interpolator
self.ratio = ratio
self.mode = mode
def __call__(self, sample):
random_state = np.random.RandomState(int(time.time()))
if random_state.rand(1)[0] < self.ratio:
img, seg = sample['image'], sample['label']
image_size = img.GetSize()
image_spacing = img.GetSpacing()
if self.rotation_center:
rotation_center = self.rotation_center
else:
rotation_center = (np.array(image_size) // 2).tolist()
rotation_center = img.TransformIndexToPhysicalPoint(rotation_center)
rotation_radians_x = random_state.normal(0, self.rotation_angles[0]/2) * np.pi/180
rotation_radians_y = random_state.normal(0, self.rotation_angles[1]/2) * np.pi/180
rotation_radians_z = random_state.normal(0, self.rotation_angles[2]/2) * np.pi/180
random_trans_x = random_state.normal(0, self.translation[0] / 2) * image_spacing[0]
random_trans_y = random_state.normal(0, self.translation[1] / 2) * image_spacing[1]
random_trans_z = random_state.normal(0, self.translation[2] / 2) * image_spacing[2]
# initialize a bspline transform
rigid_transform = sitk.Euler3DTransform(rotation_center, rotation_radians_x, rotation_radians_y, rotation_radians_z,
(random_trans_x, random_trans_y, random_trans_z))
# deform and resample image
if self.mode == 'both':
img_trans = resample(img, rigid_transform, interpolator=self.interpolator, default_value=0.1)
seg_trans = resample(seg, rigid_transform, interpolator=sitk.sitkNearestNeighbor, default_value=0)
elif self.mode == 'image':
img_trans = resample(img, rigid_transform, interpolator=self.interpolator, default_value=0.1)
seg_trans = seg
elif self.mode == 'label':
img_trans = img
seg_trans = resample(seg, rigid_transform, interpolator=sitk.sitkNearestNeighbor, default_value=0)
else:
raise ValueError('Wrong rigid transformation mode :{}!'.format(self.mode))
sample['image'] = img_trans
sample['label'] = seg_trans
return sample
class IdentityTransform(object):
"""Identity transform that do nothing"""
def __call__(self, sample):
return sample
def resample(image, transform, interpolator=sitk.sitkBSpline, default_value=0.0):
"""Resample a transformed image"""
reference_image = image
return sitk.Resample(image, reference_image, transform,
interpolator, default_value)
class GaussianBlur(object):
def __init__(self, variance=0.5, maximumKernelWidth=1, maximumError=0.9, ratio=1.0):
self.ratio = ratio
self.variance = variance
self.maximumKernelWidth = maximumKernelWidth
self.maximumError = maximumError
def __call__(self, sample):
random_state = np.random.RandomState(int(time.time()))
if random_state.rand() < self.ratio:
img, seg = sample['image'], sample['label']
sample['image'] = sitk.DiscreteGaussian(
img, variance=self.variance, maximumKernelWidth=self.maximumKernelWidth, maximumError=self.maximumError,
useImageSpacing=False)
return sample
class BilateralFilter(object):
def __init__(self, domainSigma=0.5, rangeSigma=0.06, numberOfRangeGaussianSamples=50, ratio=1.0):
self.domainSigma = domainSigma
self.rangeSigma = rangeSigma
self.numberOfRangeGaussianSamples = numberOfRangeGaussianSamples
self.ratio = ratio
def __call__(self, sample):
random_state = np.random.RandomState(int(time.time()))
if random_state.rand(1)[0] < self.ratio:
img, _ = sample['image'], sample['label']
sample['image'] = sitk.Bilateral(img, domainSigma=self.domainSigma, rangeSigma=self.rangeSigma,
numberOfRangeGaussianSamples=self.numberOfRangeGaussianSamples)
return sample
class RandomCrop(object):
"""Crop randomly the image in a sample. This is usually used for data augmentation
Args:
output_size (tuple or int): Desired output size. If int, cubic crop
is made.
"""
def __init__(self, output_size, threshold=-0, random_state=None):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) == 3
self.output_size = output_size
self.threshold = threshold
if random_state:
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
def __call__(self, sample):
"""
the input patch sz and output_size is defined in itk coord
:param sample:
:return:
"""
img, seg = sample['image'], sample['label']
size_old = img.GetSize()
size_new = self.output_size
self.random_state = np.random.RandomState(int(time.time()))
contain_label = False
roiFilter = sitk.RegionOfInterestImageFilter()
roiFilter.SetSize([size_new[0], size_new[1], size_new[2]])
# print(sample['name'])
while not contain_label:
# get the start crop coordinate in ijk
start_i = self.random_state.randint(0, size_old[0] - size_new[0])
start_j = self.random_state.randint(0, size_old[1] - size_new[1])
start_k = self.random_state.randint(0, size_old[2] - size_new[2])
# start_i = torch.IntTensor(1).random_(0, size_old[0] - size_new[0])[0]
# start_j = torch.IntTensor(1).random_(0, size_old[1] - size_new[1])[0]
# start_k = torch.IntTensor(1).random_(0, size_old[2] - size_new[2])[0]
# print(sample['name'], start_i, start_j, start_k)
roiFilter.SetIndex([start_i, start_j, start_k])
seg_crop = roiFilter.Execute(seg)
# statFilter = sitk.StatisticsImageFilter()
# statFilter.Execute(seg_crop)
#
# # will iterate until a sub volume containing label is extracted
# if statFilter.GetSum() >= 1:
# contain_label = True
seg_crop_np = sitk.GetArrayViewFromImage(seg_crop)
# center_ind = np.array(seg_crop_np.shape)//2-1
# if seg_crop_np[center_ind[0], center_ind[1], center_ind[2]] > 0:
# contain_label = True
if np.sum(seg_crop_np)/seg_crop_np.size > self.threshold:
contain_label = True
img_crop = roiFilter.Execute(img)
sample['image'] = img_crop
sample['label'] = seg_crop
return sample
class BalancedRandomCrop(object):
"""Crop randomly the image in a sample. This is usually used for data augmentation
Args:
output_size (tuple or int): Desired output size. If int, cubic crop
is made.
"""
def __init__(self, output_size, threshold=0.01, random_state=None):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) == 3
self.output_size = output_size
assert isinstance(threshold, (float, tuple))
if isinstance(threshold, float):
self.threshold = (threshold, threshold, threshold)
else:
assert len(threshold) == 2
self.threshold = threshold
if random_state:
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
self.current_class = 1 # tag that which class should be focused on currently
def __call__(self, sample):
"""
the input patch sz and output_size is defined in itk coord
:param sample:
:return:
"""
img, seg = sample['image'], sample['label']
size_old = img.GetSize()
size_new = self.output_size
self.random_state = np.random.RandomState(int(time.time()))
contain_label = False
roiFilter = sitk.RegionOfInterestImageFilter()
roiFilter.SetSize([size_new[0], size_new[1], size_new[2]])
contain_label = False
if self.current_class == 0: # random crop a patch
start_i, start_j, start_k = random_3d_coordinates(np.array(size_old) - np.array(size_new), self.random_state)
roiFilter.SetIndex([start_i, start_j, start_k])
seg_crop = roiFilter.Execute(seg)
elif self.current_class == 1: # crop a patch where class 1 in main
i = 0
# print(sample['name'])
while not contain_label:
# get the start crop coordinate in ijk
start_i, start_j, start_k = random_3d_coordinates(np.array(size_old) - np.array(size_new),
self.random_state)
roiFilter.SetIndex([start_i, start_j, start_k])
seg_crop = roiFilter.Execute(seg)
seg_crop_np = sitk.GetArrayViewFromImage(seg_crop)
if np.sum(seg_crop_np==1) / seg_crop_np.size > self.threshold[0]: # judge if the patch satisfy condition
contain_label = True
i = i + 1
else: # crop a patch where class 2 in main
# print(sample['name'])
i = 0
while not contain_label:
# get the start crop coordinate in ijk
start_i, start_j, start_k = random_3d_coordinates(np.array(size_old) - np.array(size_new),
self.random_state)
roiFilter.SetIndex([start_i, start_j, start_k])
seg_crop = roiFilter.Execute(seg)
seg_crop_np = sitk.GetArrayViewFromImage(seg_crop)
if np.sum(seg_crop_np == 2) / seg_crop_np.size > self.threshold[1]: # judge if the patch satisfy condition
contain_label = True
i = i + 1
# print(sample['name'], 'case: ', rand_ind, 'trying: ', i)
# print([start_i, start_j, start_k])
roiFilter.SetIndex([start_i, start_j, start_k])
seg_crop = roiFilter.Execute(seg)
img_crop = roiFilter.Execute(img)
sample['image'] = img_crop
sample['label'] = seg_crop
sample['class'] = self.current_class
# reset class tag
self.current_class = self.current_class+1
if self.current_class>3:
self.current_class=0
return sample
class MyRandomCrop(object):
"""Crop randomly the image in a sample. This is usually used for data augmentation
Args:
output_size (tuple or int): Desired output size. If int, cubic crop
is made.
"""
def __init__(self, output_size, nbg_threshold, crop_bg_ratio=0.1, bg_label=0,random_state=None):
self.bg_label= bg_label
self.crop_bg_ratio = crop_bg_ratio
""" expect ratio of crop backgound, assume background domain other labels"""
self.nbg_threshold = nbg_threshold
assert isinstance(output_size, (int, tuple,list))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) >1
self.output_size = output_size
if random_state:
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
def __call__(self, sample):
"""
the input patch sz and output_size is defined in itk coord
:param sample:
:return:
"""
img, seg = sample['image'], sample['label']
size_old = img.GetSize()
size_new = self.output_size
roiFilter = sitk.RegionOfInterestImageFilter()
roiFilter.SetSize(size_new)
size_new = np.flipud(size_new)
size_old = np.flipud(size_old)
self.random_state = np.random.RandomState(int(time.time()))
crop_once = self.random_state.rand()< self.crop_bg_ratio
seg_np = sitk.GetArrayViewFromImage(seg)
contain_label = False
start_coord = None
nbg_ratio = 0 # ratio of non-bg label
# print(sample['name'])
while not contain_label :
# get the start crop coordinate in ijk
start_coord = random_nd_coordinates(np.array(size_old) - np.array(size_new),
self.random_state)
seg_crop_np = cropping(seg_np,start_coord,size_new)
bg_ratio = np.sum(seg_crop_np==self.bg_label) / seg_crop_np.size
nbg_ratio =1.0- bg_ratio
if nbg_ratio > self.nbg_threshold: # judge if the patch satisfy condition
contain_label = True
elif crop_once:
break
start_coord = np.flipud(start_coord).tolist()
roiFilter.SetIndex(start_coord)
seg_crop = roiFilter.Execute(seg)
if not isinstance(img,list):
img_crop = roiFilter.Execute(img)
else:
img_crop = [roiFilter.Execute(im) for im in img]
trans_sample={}
trans_sample['image'] = img_crop
trans_sample['label'] = seg_crop
trans_sample['label_selected'] = -1
trans_sample['start_coord']= tuple(start_coord)
trans_sample['threshold'] = nbg_ratio
return trans_sample
class FlickerCrop(object):
"""Crop randomly the image in a sample. This is usually used for data augmentation
Args:
output_size (tuple or int): Desired output size. If int, cubic crop
is made.
"""
def __init__(self, output_size, adopt_bg_ratio, bg_label=0,random_state=None):
self.bg_label= bg_label
self.adopt_bg_ratio = adopt_bg_ratio
""" expect ratio of crop backgound, assume background domain other labels"""
assert isinstance(output_size, (int, tuple,list))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) >1
self.output_size = output_size
if random_state:
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
def __call__(self, sample):
"""
the input patch sz and output_size is defined in itk coord
:param sample:
:return:
"""
img, seg = sample['image'], sample['label']
if not isinstance(img,list):
size_old = img.GetSize()
else:
size_old = img[0].GetSize()
self.random_state = np.random.RandomState(int(time.time()))
size_new = self.output_size
roiFilter = sitk.RegionOfInterestImageFilter()
roiFilter.SetSize(size_new)
size_new = np.flipud(size_new)
size_old = np.flipud(size_old)
crop_once = self.random_state.rand()< self.adopt_bg_ratio
seg_np = sitk.GetArrayViewFromImage(seg)
contain_label = False
start_coord = None
nbg_ratio = 0 # ratio of non-bg label
# print(sample['name'])
while not contain_label :
# get the start crop coordinate in ijk
start_coord = random_nd_coordinates(np.array(size_old) - np.array(size_new),
self.random_state)
seg_crop_np = cropping(seg_np,start_coord,size_new)
bg_ratio = np.sum(seg_crop_np==self.bg_label) / seg_crop_np.size
nbg_ratio =1.0- bg_ratio
if nbg_ratio > self.nbg_threshold: # judge if the patch satisfy condition
contain_label = True
elif crop_once:
break
start_coord = np.flipud(start_coord).tolist()
roiFilter.SetIndex(start_coord)
seg_crop = roiFilter.Execute(seg)
img_crop = roiFilter.Execute(img)
trans_sample={}
trans_sample['image'] = img_crop
trans_sample['label'] = seg_crop
trans_sample['label_selected'] = -1
trans_sample['start_coord']= tuple(start_coord)
trans_sample['threshold'] = nbg_ratio
return trans_sample
class MyBalancedRandomCrop(object):
"""Crop randomly the image in a sample. This is usually used for data augmentation
Args:
output_size (tuple or int): Desired output size. If int, cubic crop
is made.
"""
def __init__(self, output_size, threshold, random_state=None, label_list=(),max_crop_num = -1):
#assert max_crop_num==-1, "dataloader bugs, not fixed now"
self.num_label= len(label_list)
self.label_list = label_list
self.max_crop_on = max_crop_num>0
self.max_crop_num = max_crop_num
assert isinstance(output_size, (int, tuple,list))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) >1
self.output_size = output_size # the given outputsize is in numpy cord
assert isinstance(threshold, (float, tuple,list))
if isinstance(threshold, float):
self.threshold = tuple([threshold]*self.num_label)
else:
#assert sum(np.array(threshold)!=0) == self.num_label
self.threshold = threshold
if random_state:
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
self.cur_label_id = random.randint(0,self.num_label-1)
### if the max_crop_num>0, then a numpy of shape [], the coodinate is in numpy style
print("Init my current transfrom: {}".format(threading.current_thread()))
if self.max_crop_on:
self.np_coord_buffer = np.zeros((self.num_label, self.max_crop_num, 3)).astype(np.int32)
self.np_coord_count = np.zeros(self.num_label).astype(np.int32)
def __call__(self, sample, rand_id=-1):
"""
the input patch sz and output_size is defined in itk coord
:param sample:if the img in sample is a list, then return a list of img list otherwise return single image
:return:
"""
if rand_id >=0:
cur_label_id = rand_id%self.num_label
else:
raise ValueError("should not happen in this case")
#print("id(self): {} , cur_label_id:{}".format(id(self),cur_label_id))
is_numpy = False
#self.random_state =np.random.RandomState(rand_id)
self.random_state = np.random.RandomState(int(time.time()))
# the size coordinate system here is according to the itk coordinate
img, seg = sample['image'], sample['label']
cur_label = int(self.label_list[cur_label_id])
if isinstance(img,list):
if not isinstance(img[0],np.ndarray):
size_old = img[0].GetSize()
else:
is_numpy = True
#cur_label = cur_label_id
size_old = np.flipud(list(img[0].shape)) #itkcoord
else:
if not isinstance(img,np.ndarray):
size_old = img.GetSize() # itkcoord
else:
is_numpy = True
size_new = self.output_size #itk coord
#cur_label_id = self.random_state.randint(self.num_label)
if not is_numpy:
roiFilter = sitk.RegionOfInterestImageFilter()
roiFilter.SetSize(size_new)
seg_np = sitk.GetArrayViewFromImage(seg)
else:
seg_np = seg.copy()
label_ratio =0
if self.max_crop_on and self.np_coord_count[cur_label_id] >= self.max_crop_num:
ins_id = self.np_coord_count[cur_label_id] % self.max_crop_num
start_coord = self.np_coord_buffer[cur_label_id, ins_id, :]
size_new = np.flipud(size_new)
#print("this is signal")
else:
# here the coordinate system transfer from the sitk to numpy
size_new = np.flipud(size_new)
size_old = np.flipud(size_old)
# rand_ind = self.random_state.randint(3) # random choose to focus on one class
contain_label = False
start_coord = None
count = 0
# print(sample['name'])
while not contain_label:
# get the start crop coordinate in ijk, the operation is did in the numpy coordinate
start_coord = random_nd_coordinates(np.array(size_old) - np.array(size_new),
self.random_state)
seg_crop_np = cropping(seg_np,start_coord,size_new)
label_ratio = np.sum(seg_crop_np==cur_label) / float(seg_crop_np.size)
count += 1
if count>10000:
print("Warning!!!!!, no crop")
print(cur_label_id)
print(self.label_list)
if label_ratio >= self.threshold[cur_label]: # judge if the patch satisfy condition
contain_label = True
if self.max_crop_on:
self.np_coord_buffer[cur_label_id, self.np_coord_count[cur_label_id], :] = start_coord
if self.max_crop_on:
#print("coord count: {}--{}--{}--{}".format(self.np_coord_count, threading.current_thread(), id(self.np_coord_count), id(self)))
self.np_coord_count[cur_label_id] += 1
if is_numpy:
after_coord = [start_coord[i] + size_new[i] for i in range(len(size_new))]
img_crop =[]
if isinstance(img, list):
for im in img:
img_crop += [im[start_coord[0]:after_coord[0], start_coord[1]:after_coord[1], start_coord[2]:after_coord[2]].copy()]
seg_crop =seg_np[start_coord[0]:after_coord[0], start_coord[1]:after_coord[1],
start_coord[2]:after_coord[2]].copy()
img_crop = np.stack(img_crop,0)
seg_crop =np.expand_dims(seg_crop,0)
# transfer the numpy coordinate into itk coordinate
start_coord = np.flipud(start_coord).tolist()
# now transfer the system into the sitk system
else:
start_coord = np.flipud(start_coord).tolist()
roiFilter.SetIndex(start_coord)
seg_crop = roiFilter.Execute(seg)
if not isinstance(img,list):
img_crop = roiFilter.Execute(img)
else:
img_crop = [roiFilter.Execute(im) for im in img]
trans_sample={}
trans_sample['image'] = img_crop
trans_sample['label'] = seg_crop
trans_sample['label_selected'] = cur_label
trans_sample['start_coord']= tuple(start_coord) # is always given in itk coord
trans_sample['threshold'] = label_ratio
if rand_id<0:
self.cur_label_id = cur_label_id + 1
self.cur_label_id = self.cur_label_id if self.cur_label_id < self.num_label else 0
return trans_sample
def cropping(img,start_coord,size_new):
if len(start_coord)==2:
return img[start_coord[0]:start_coord[0]+size_new[0],start_coord[1]:start_coord[1]+size_new[1]]
elif len(start_coord)==3:
return img[start_coord[0]:start_coord[0]+size_new[0],start_coord[1]:start_coord[1]+size_new[1],
start_coord[2]: start_coord[2] + size_new[2]]
def random_nd_coordinates(range_nd, random_state=None):
# if not random_state:
# random_state = np.random.RandomState()
dim = len(range_nd)
return [random_state.randint(0, range_nd[i]) for i in range(dim)]
def random_3d_coordinates(range_3d, random_state=None):
assert len(range_3d)==3
if not random_state:
random_state = np.random.RandomState()
return [random_state.randint(0, range_3d[i]) for i in range(3)]
| 29,747 | 36.513241 | 140 | py |
easyreg | easyreg-master/data_pre/partition.py | import numpy as np
import SimpleITK as sitk
def partition(option_p, patch_size,overlap_size, mode=None, img_sz=(-1,-1,-1), flicker_on=False, flicker_mode='rand'):
padding_mode = option_p[('padding_mode', 'reflect', 'padding_mode')]
mode = mode if mode is not None else option_p[('mode', 'pred', 'eval or pred')]
flicker_range = option_p[('flicker_range', 0, 'flicker range')]
partition = Partition(patch_size, overlap_size, padding_mode=padding_mode, mode=mode,img_sz=img_sz, flicker_on=flicker_on,flicker_range=flicker_range, flicker_mode=flicker_mode)
return partition
class Partition(object):
"""partition a 3D volume into small 3D patches using the overlap tiling strategy described in paper:
"U-net: Convolutional networks for biomedical image segmentation." by Ronneberger, Olaf, Philipp Fischer,
and Thomas Brox. In International Conference on Medical Image Computing and Computer-Assisted Intervention,
pp. 234-241. Springer, Cham, 2015.
Note: BE CAREFUL about the order of dimensions for image:
The simpleITK image are in order x, y, z
The numpy array/torch tensor are in order z, y, x
:param tile_size (tuple of 3 or 1x3 np array): size of partitioned patches
:param self.overlap_size (tuple of 3 or 1x3 np array): the size of overlapping region at both end of each dimension
:param padding_mode (tuple of 3 or 1x3 np array): the mode of numpy.pad when padding extra region for image
:param mode: "pred": only image is partitioned; "eval": both image and segmentation are partitioned TODO
"""
def __init__(self, tile_size, overlap_size, padding_mode='reflect', mode="pred", img_sz=None,flicker_on=False,flicker_range=0,flicker_mode='rand'):
self.tile_size = np.flipud(np.asarray(tile_size)) # convert the itk coord to np coord
self.overlap_size = np.flipud(np.asarray(overlap_size)) # convert the itk coord to np coord
self.image_size = img_sz
self.padding_mode = padding_mode
self.mode = mode
self.flicker_on=flicker_on
self.flicker_range = flicker_range
self.flicker_mode= flicker_mode
def __call__(self, sample,disp=0):
"""
:param image: (simpleITK image) 3D Image to be partitioned
:param seg: (simpleITK image) 3D segmentation label mask to be partitioned
:return: N partitioned image and label patches
{'image': Nx1xDxHxW, 'label': Nx1xDxHxW }
"""
# get numpy array from simpleITK images
images_t = sample['image']
#self.image = sample['image']
is_numpy = False
if not isinstance(images_t,list):
# is not list, then it should be itk image
images = [sitk.GetArrayFromImage(images_t)]
else:
if not isinstance(images_t[0], np.ndarray):
images = [ sitk.GetArrayFromImage(image) for image in images_t]
else:
is_numpy = True
images = images_t
if 'label' in sample:
if not is_numpy:
seg_np = sitk.GetArrayFromImage(sample['label'])
else:
seg_np = sample['label']
self.image_size = np.array(images[0].shape) # np coord
self.effective_size = self.tile_size - self.overlap_size * 2 # size effective region of tiles after cropping
self.tiles_grid_size = np.ceil(self.image_size / self.effective_size).astype(int) # size of tiles grid
self.padded_size = self.effective_size * self.tiles_grid_size + self.overlap_size * 2 - self.image_size # size difference of padded image with original image
#print("partitioning, the padded size is {}".format(self.padded_size))
if self.flicker_on:
pp = self.flicker_range
else:
pp=0
if self.mode == 'eval':
seg_padded = np.pad(seg_np,
pad_width=((self.overlap_size[0]+pp, self.padded_size[0] - self.overlap_size[0]+pp),
(self.overlap_size[1]+pp, self.padded_size[1] - self.overlap_size[1]+pp),
(self.overlap_size[2]+pp, self.padded_size[2] - self.overlap_size[2]+pp)),
mode=self.padding_mode)
image_tile_list = []
start_coord_list = []
seg_tile_list = []
image_padded_list = [np.pad(image_np,
pad_width=((self.overlap_size[0] + pp, self.padded_size[0] - self.overlap_size[0] + pp),
(self.overlap_size[1] + pp, self.padded_size[1] - self.overlap_size[1] + pp),
(self.overlap_size[2] + pp, self.padded_size[2] - self.overlap_size[2] + pp)),
mode=self.padding_mode) for image_np in images]
for i in range(self.tiles_grid_size[0]):
for j in range(self.tiles_grid_size[1]):
for k in range(self.tiles_grid_size[2]):
if self.flicker_on:
if self.flicker_mode=='rand':
ri, rj, rk = [np.random.randint(-self.flicker_range,self.flicker_range) for _ in range(3)]
elif self.flicker_mode =='ensemble':
ri,rj,rk = disp
else:
ri,rj,rk= 0, 0, 0
image_tile_temp_list = [image_padded[
i * self.effective_size[0]+ri+pp:i * self.effective_size[0] + self.tile_size[0]+ri+pp,
j * self.effective_size[1]+rj+pp:j * self.effective_size[1] + self.tile_size[1]+rj+pp,
k * self.effective_size[2]+rk+pp:k * self.effective_size[2] + self.tile_size[2]+rk+pp]
for image_padded in image_padded_list]
image_tile_list.append(np.stack(image_tile_temp_list,0))
start_coord_list.append((i * self.effective_size[0]+ri,j * self.effective_size[1]+rj,k * self.effective_size[2]+rk))
if self.mode == 'eval':
seg_tile_temp = seg_padded[
i * self.effective_size[0]+ri+pp:i * self.effective_size[0] + self.tile_size[0]+ri+pp,
j * self.effective_size[1]+rj+pp:j * self.effective_size[1] + self.tile_size[1]+rj+pp,
k * self.effective_size[2]+rk+pp:k * self.effective_size[2] + self.tile_size[2]+rk+pp]
seg_tile_list.append(np.expand_dims(seg_tile_temp, axis=0))
# sample['image'] = np.stack(image_tile_list, 0)
# sample['segmentation'] = np.stack(seg_tile_list, 0)
trans_sample ={}
trans_sample['image'] = np.stack(image_tile_list, 0) # N*C*xyz
if 'label'in sample:
if self.mode == 'pred':
trans_sample['label'] = np.expand_dims(np.expand_dims(seg_np, axis=0), axis=0) #1*XYZ
else:
trans_sample['label'] = np.stack(seg_tile_list, 0) # N*1*xyz
trans_sample['tile_size'] = self.tile_size
trans_sample['overlap_size'] = self.overlap_size
trans_sample['padding_mode'] = self.padding_mode
trans_sample['flicker_on'] = self.flicker_on
trans_sample['disp'] = disp
trans_sample['num_crops_per_img'] = len(image_tile_list)
trans_sample['start_coord_list'] = start_coord_list
return trans_sample
def assemble(self, tiles,image_size=None, is_vote=False):
"""
Assembles segmentation of small patches into the original size
:param tiles: Nxhxdxw tensor contains N small patches of size hxdxw
:param is_vote:
:return: a segmentation information
"""
if image_size is not None:
self.image_size = image_size
self.effective_size = self.tile_size - self.overlap_size * 2 # size effective region of tiles after cropping
self.tiles_grid_size = np.ceil(self.image_size / self.effective_size).astype(int) # size of tiles grid
self.padded_size = self.effective_size * self.tiles_grid_size + self.overlap_size * 2 - self.image_size # size difference of padded image with original image
tiles = tiles.numpy()
if is_vote:
label_class = np.unique(tiles)
seg_vote_array = np.zeros(
np.insert(self.effective_size * self.tiles_grid_size + self.overlap_size * 2, 0, label_class.size),
dtype=int)
for i in range(self.tiles_grid_size[0]):
for j in range(self.tiles_grid_size[1]):
for k in range(self.tiles_grid_size[2]):
ind = i * self.tiles_grid_size[1] * self.tiles_grid_size[2] + j * self.tiles_grid_size[2] + k
for label in label_class:
local_ind = np.where(
tiles[ind] == label) # get the coordinates in local patch of each class
global_ind = (local_ind[0] + i * self.effective_size[0],
local_ind[1] + j * self.effective_size[1],
local_ind[2] + k * self.effective_size[2]) # transfer into global coordinates
seg_vote_array[label][global_ind] += 1 # vote for each glass
seg_reassemble = np.argmax(seg_vote_array, axis=0)[
self.overlap_size[0]:self.overlap_size[0] + self.image_size[0],
self.overlap_size[1]:self.overlap_size[1] + self.image_size[1],
self.overlap_size[2]:self.overlap_size[2] + self.image_size[2]].astype(np.uint8)
pass
else:
seg_reassemble = np.zeros(self.effective_size * self.tiles_grid_size)
for i in range(self.tiles_grid_size[0]):
for j in range(self.tiles_grid_size[1]):
for k in range(self.tiles_grid_size[2]):
ind = i * self.tiles_grid_size[1] * self.tiles_grid_size[2] + j * self.tiles_grid_size[2] + k
seg_reassemble[i * self.effective_size[0]:(i + 1) * self.effective_size[0],
j * self.effective_size[1]:(j + 1) * self.effective_size[1],
k * self.effective_size[2]:(k + 1) * self.effective_size[2]] = \
tiles[ind][self.overlap_size[0]:self.tile_size[0] - self.overlap_size[0],
self.overlap_size[1]:self.tile_size[1] - self.overlap_size[1],
self.overlap_size[2]:self.tile_size[2] - self.overlap_size[2]]
seg_reassemble = seg_reassemble[:self.image_size[0], :self.image_size[1], :self.image_size[2]]
# seg_image = sitk.GetImageFromArray(seg_reassemble)
# seg_image.CopyInformation(self.image)
seg_reassemble = np.expand_dims(np.expand_dims(seg_reassemble, axis=0), axis=0)
return seg_reassemble
def assemble_multi_torch(self, tiles,image_size=None):
"""
Assembles segmentation of small patches into the original size
:param tiles: Nxhxdxw tensor contains N small patches of size hxdxw
:param is_vote:
:return: a segmentation information
"""
import torch
if image_size is not None:
self.image_size = image_size
self.effective_size = self.tile_size - self.overlap_size * 2 # size effective region of tiles after cropping
self.tiles_grid_size = np.ceil(self.image_size / self.effective_size).astype(int) # size of tiles grid
self.padded_size = self.effective_size * self.tiles_grid_size + self.overlap_size * 2 - self.image_size # size difference of padded image with original image
seg_reassemble = torch.zeros([1,tiles.shape[1]]+list(self.effective_size * self.tiles_grid_size)).to(tiles.device)
for i in range(self.tiles_grid_size[0]):
for j in range(self.tiles_grid_size[1]):
for k in range(self.tiles_grid_size[2]):
ind = i * self.tiles_grid_size[1] * self.tiles_grid_size[2] + j * self.tiles_grid_size[2] + k
seg_reassemble[0,:,i * self.effective_size[0]:(i + 1) * self.effective_size[0],
j * self.effective_size[1]:(j + 1) * self.effective_size[1],
k * self.effective_size[2]:(k + 1) * self.effective_size[2]] = \
tiles[ind][:,self.overlap_size[0]:self.tile_size[0] - self.overlap_size[0],
self.overlap_size[1]:self.tile_size[1] - self.overlap_size[1],
self.overlap_size[2]:self.tile_size[2] - self.overlap_size[2]]
seg_reassemble = seg_reassemble[:,:,:self.image_size[0], :self.image_size[1], :self.image_size[2]]
# seg_image = sitk.GetImageFromArray(seg_reassemble)
# seg_image.CopyInformation(self.image)
return seg_reassemble
| 13,186 | 54.64135 | 181 | py |
easyreg | easyreg-master/data_pre/reg_preprocess_example/utils.py | import os
from easyreg.reg_data_utils import loading_img_list_from_files,generate_pair_name
local_path = "/playpen-raid/zyshen/oai_data"
server_path = "/pine/scr/z/y/zyshen/data/oai_data"
switcher = (local_path, server_path)
def server_switcher(f_path,switcher=("","")):
if len(switcher[0]):
f_path = f_path.replace(switcher[0],switcher[1])
return f_path
def split_input(original_txt_path):
source_path_list, target_path_list, l_source_path_list, l_target_path_list = loading_img_list_from_files(
original_txt_path)
file_num = len(source_path_list)
if l_source_path_list is not None and l_target_path_list is not None:
assert len(source_path_list) == len(l_source_path_list)
file_list = [[source_path_list[i], target_path_list[i],l_source_path_list[i],l_target_path_list[i]] for i in range(file_num)]
else:
file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)]
fname_list = [generate_pair_name([file_list[i][0],file_list[i][1]]) for i in range(file_num)]
return file_list, fname_list
def print_txt(txt, output_path):
with open(output_path, "w") as f:
f.write(txt)
def printer(cmdl, name,output_path, mem=6, n_cpu=8, t=3):
txt = """#!/bin/bash
#SBATCH -p general
#SBATCH -N 1
#SBATCH --mem={}g
#SBATCH -n 1
#SBATCH -c {}
#SBATCH --output={}.txt
#SBATCH -t {}:00:00
source activate torch4
cd /pine/scr/z/y/zyshen/reg_clean/demo
srun {}
""".format(mem,n_cpu,name,t,cmdl)
print_txt(txt,output_path)
return output_path
def generate_slurm_txt(task_path_list,txt_output_folder):
txt = "#!/bin/bash\n"
slurm_txt_path = os.path.join(txt_output_folder,"slurm.sh")
txt_output_folder_sever = server_switcher(txt_output_folder,switcher)
for task_path in task_path_list:
txt += "sbatch {}/{}\n".format(txt_output_folder_sever,task_path)
print_txt(txt, slurm_txt_path)
def get_cmdl(setting_folder_path,task_output_path,pair_path,gpu_id=0):
cmdl = "python demo_for_easyreg_eval.py --setting_folder_path {} --task_output_path {} -s {} -t {} -ls {} -lt {} --gpu_id {}"\
.format(setting_folder_path,task_output_path,pair_path[0],pair_path[1], pair_path[2], pair_path[3],gpu_id)
return cmdl
def generate_pair_setting(setting_folder_path,task_output_folder,pair_path_list,pair_name_list,txt_output_folder,switcher=("",""), mem=12, n_cpu=8, t=36, gpu_id=0):
server_task_list = []
for pair_path, pair_name in zip(pair_path_list, pair_name_list):
pair_path = [server_switcher(path,switcher) for path in pair_path]
task_pair_output_path = os.path.join(task_output_folder,pair_name)
cmdl = get_cmdl(setting_folder_path, task_pair_output_path, pair_path, gpu_id)
pair_txt_name = pair_name+".sh"
txt_output_path = os.path.join(txt_output_folder,pair_txt_name)
printer(cmdl, pair_name, txt_output_path, mem, n_cpu, t)
server_task_list.append(pair_txt_name)
return server_task_list
def generate_inter_setting(task_type):
task_name = "{}_inter".format(task_type)
setting_folder_path = os.path.join(server_path,"task_settings_for_full_resolution",task_type)
pair_txt_path = os.path.join(local_path,"reg_debug_labeled_oai_reg_inter","test","pair_path_list.txt")
txt_output_folder = os.path.join(local_path,"sever_slurm",task_name)
task_output_folder = os.path.join(server_path,"expri",task_name)
os.makedirs(txt_output_folder,exist_ok=True)
file_list, fname_list = split_input(pair_txt_path)
server_task_list = generate_pair_setting(setting_folder_path,task_output_folder,file_list,fname_list,txt_output_folder,switcher=switcher, gpu_id=-1)
generate_slurm_txt(server_task_list,txt_output_folder)
def generate_atlas_setting(task_type):
local_path = "/playpen-raid/zyshen/oai_data"
server_path = "/pine/scr/z/y/zyshen/data/oai_data"
task_name = "{}_atlas".format(task_type)
setting_folder_path = os.path.join(server_path,"task_settings_for_full_resolution",task_type)
pair_txt_path = os.path.join(local_path,"reg_test_for_atlas","test","pair_path_list.txt")
txt_output_folder = os.path.join(local_path,"sever_slurm",task_name)
task_output_folder = os.path.join(server_path,"expri",task_name)
os.makedirs(txt_output_folder,exist_ok=True)
file_list, fname_list = split_input(pair_txt_path)
server_task_list = generate_pair_setting(setting_folder_path,task_output_folder,file_list,fname_list,txt_output_folder,switcher=switcher, gpu_id=-1)
generate_slurm_txt(server_task_list,txt_output_folder)
task_types = ["ants","demons","nifty_reg"]
for task_type in task_types:
generate_inter_setting(task_type)
generate_atlas_setting(task_type)
| 4,748 | 41.026549 | 164 | py |
easyreg | easyreg-master/data_pre/reg_preprocess_example/stik_resize_vs_mermaid_resize.py | import numpy as np
import SimpleITK as sitk
from easyreg.utils import get_resampled_image
import torch
def resize_img(img, is_label=False,img_after_resize=None):
"""
:param img: sitk input, factor is the outputs_ize/patched_sized
:return:
"""
img_sz = img.GetSize()
if img_after_resize is None:
img_after_resize = np.flipud(img_sz)
resize_factor = np.array(img_after_resize) / np.flipud(img_sz)
spacing_factor = (np.array(img_after_resize)-1) / (np.flipud(img_sz)-1)
resize = not all([factor == 1 for factor in resize_factor])
if resize:
resampler = sitk.ResampleImageFilter()
dimension = 3
factor = np.flipud(resize_factor)
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / spacing_factor[0]
matrix[1, 1] = 1. / spacing_factor[1]
matrix[2, 2] = 1. / spacing_factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_label:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkBSpline)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
return img_resampled, resize_factor
def read_and_clean_itk_info(path):
if path is not None:
img = sitk.ReadImage(path)
spacing_sitk = img.GetSpacing()
img_sz_sitk = img.GetSize()
return sitk.GetImageFromArray(sitk.GetArrayFromImage(img)), np.flipud(spacing_sitk), np.flipud(img_sz_sitk)
else:
return None, None, None
img_path = "/playpen-raid2/Data/Lung_Registration_clamp_normal_transposed/10056H/10056H_EXP_STD_NJC_COPD_img.nii.gz"
img_sitk, original_spacing, original_sz = read_and_clean_itk_info(img_path)
sampled_size = np.array([160,160,160])
img_np = sitk.GetArrayFromImage(img_sitk)
resized_img, resize_factor = resize_img(img_sitk,img_after_resize=sampled_size)
img_sitkresized_np = sitk.GetArrayFromImage(resized_img)
print(img_np.shape)
spacing = np.array([1,1,1])
img_merresized_img = get_resampled_image(torch.Tensor(img_np)[None][None], spacing,np.array([1,1,160,160,160]), 1, zero_boundary=False)
img_merresized_img = img_merresized_img.numpy().squeeze()
print((img_sitkresized_np- img_merresized_img).sum())
img_sitk = sitk.GetImageFromArray(img_sitkresized_np.astype(np.float32))
sitk.WriteImage(img_sitk,"/playpen-raid1/zyshen/debug/img_sitkresized_np.nii.gz")
img_sitk = sitk.GetImageFromArray(img_merresized_img.astype(np.float32))
sitk.WriteImage(img_sitk,"/playpen-raid1/zyshen/debug/img_merresized_img.nii.gz")
| 2,843 | 40.823529 | 135 | py |
easyreg | easyreg-master/data_pre/reg_preprocess_example/gen_from_brainstorm.py | """
Input txt for atlas to image , i.e. train txt atlas, image, atlas_label
folder for color image
folder for transformation
"""
import matplotlib as matplt
matplt.use('Agg')
import sys,os
os.environ["CUDA_VISIBLE_DEVICES"] = ''
sys.path.insert(0,os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('../mermaid'))
from mermaid.model_evaluation import evaluate_model
import random
from mermaid.utils import *
from mermaid.data_utils import *
import numpy as np
import SimpleITK as sitk
import nibabel as nib
import copy
from glob import glob
def read_txt_into_list(file_path):
import re
lists= []
with open(file_path,'r') as f:
content = f.read().splitlines()
if len(content)>0:
lists= [[x if x!='None'else None for x in re.compile('\s*[,|\s+]\s*').split(line)] for line in content]
lists = [list(filter(lambda x: x is not None, items)) for items in lists]
lists = [item[0] if len(item) == 1 else item for item in lists]
return lists
def gen_aug_from_brainstorm(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference,input_0_1=True):
color_name_list = [get_file_name(path) for path in color_path_list]
trans_name_list = [get_file_name(path) for path in trans_path_list]
name_list = [name.replace(color_to_trans_switcher[1],'') for name in trans_name_list]
fr_sitk = lambda x:sitk.GetArrayFromImage(sitk.ReadImage(x))
color_list = [fr_sitk(color_path)[None] for color_path in color_path_list]
trans_path_list = [np.transpose(fr_sitk(trans_path),[3,2,1,0]) for trans_path in trans_path_list]
color = torch.Tensor(np.stack(color_list))
color = (color+1.0)/2 if not input_0_1 else color
trans = torch.Tensor(np.stack(trans_path_list))
atlas_label = torch.Tensor(fr_sitk(atlas_label_path))[None][None]
spacing = 1./(np.array(color.shape[2:])-1)
num_aug = 1500
batch =10
num_iter = int(num_aug/batch)
index_list = list(range(len(color_name_list)))
atlas_label = atlas_label.repeat(batch,1,1,1,1)
for i in range(num_iter):
index_trans = random.sample(index_list,batch)
index_color = random.sample(index_list,batch)
color_cur = color[index_color]
trans_cur = trans[index_trans]
img_names = [name_list[index_color[i]]+'_color_'+ name_list[index_trans[i]]+"_phi_image" for i in range(batch)]
label_names = [name_list[index_color[i]]+'_color_'+ name_list[index_trans[i]]+"_phi_label" for i in range(batch)]
warped_img = compute_warped_image_multiNC(color_cur,trans_cur,spacing,spline_order=1,zero_boundary=True)
label = compute_warped_image_multiNC(atlas_label,trans_cur,spacing,spline_order=0,zero_boundary=True)
save_image_with_given_reference(warped_img,[img_for_reference]*batch,output_folder,img_names)
save_image_with_given_reference(label,[img_for_reference]*batch,output_folder,label_names)
def gen_aug_from_brainstorm_not_read_in_memory(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference,input_0_1=True):
color_name_list = [get_file_name(path) for path in color_path_list]
trans_name_list = [get_file_name(path) for path in trans_path_list]
name_list = [name.replace(color_to_trans_switcher[1],'') for name in trans_name_list]
fr_sitk = lambda x:sitk.GetArrayFromImage(sitk.ReadImage(x))
color_list = [fr_sitk(color_path)[None] for color_path in color_path_list]
color = torch.Tensor(np.stack(color_list))
color = (color+1.0)/2 if not input_0_1 else color
atlas_label = torch.Tensor(fr_sitk(atlas_label_path))[None][None]
spacing = 1./(np.array(color.shape[2:])-1)
num_aug = 300
batch =10
num_iter = int(num_aug/batch)
index_color_list = list(range(len(color_name_list)))
index_trans_list = list(range(len(trans_name_list)))
atlas_label = atlas_label.repeat(batch,1,1,1,1)
for i in range(num_iter):
index_trans = random.sample(index_trans_list,batch)
index_color = random.sample(index_color_list,batch)
trans_list = [np.transpose(fr_sitk(trans_path_list[i]), [3, 2, 1, 0]) for i in index_trans]
trans_cur = torch.Tensor(np.stack(trans_list))
color_cur = color[index_color]
img_names = [name_list[index_color[i]]+'_color_'+ trans_name_list[index_trans[i]]+"_phi_image" for i in range(batch)]
label_names = [name_list[index_color[i]]+'_color_'+ trans_name_list[index_trans[i]]+"_phi_label" for i in range(batch)]
warped_img = compute_warped_image_multiNC(color_cur,trans_cur,spacing,spline_order=1,zero_boundary=True)
label = compute_warped_image_multiNC(atlas_label,trans_cur,spacing,spline_order=0,zero_boundary=True)
save_image_with_given_reference(warped_img,[img_for_reference]*batch,output_folder,img_names)
save_image_with_given_reference(label,[img_for_reference]*batch,output_folder,label_names)
# color_folder = "/playpen-raid/zyshen/data/oai_reg/brainstorm/color_lrfix_res/reg/res/records/3D"
# trans_folder = "/playpen-raid/zyshen/data/oai_reg/brainstorm/trans_lrfix_res/reg/res/records"
# color_type="_atlas_image_test_iter_0_warped_test_iter_0_warped.nii.gz"
# color_to_trans_switcher = ("atlas_image_test_iter_0_warped_test_iter_0_warped","phi")
# output_folder ="/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_fake_img_disp"
# atlas_label_path ="/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz"
# img_for_reference ="/playpen-raid/zyshen/data/oai_seg/atlas_image.nii.gz"
# os.makedirs(output_folder,exist_ok=True)
# color_path_list = glob(os.path.join(color_folder,"*"+color_type))
# color_name_list = [get_file_name(path) for path in color_path_list]
# trans_path_list = [os.path.join(trans_folder,color_name.replace(*color_to_trans_switcher)+'.nii.gz') for color_name in color_name_list]
# gen_aug_from_brainstorm(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference, input_0_1=False)
def inverse_phi_name(fname):
fcomp = fname.split("_")
f_inverse = fcomp[2]+'_'+fcomp[3]+'_'+fcomp[0]+'_'+fcomp[1]+"_phi"
return f_inverse
# atlas_pair = "/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/atlas_to.txt"
# color_folder = "/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/test_sm/reg/res/records/3D"
# pair_list = read_txt_into_list(atlas_pair)
# img_name_list = [get_file_name(pair[1]) for pair in pair_list]
# color_path_list = [os.path.join(color_folder,fname + "_atlas_image_test_iter_0_warped.nii.gz") for fname in img_name_list]
# color_name_list = [get_file_name(path) for path in color_path_list]
# trans_folder = "/playpen-raid/zyshen/data/oai_reg/brainstorm/trans_lrfix_res/reg/res/records"
# color_to_trans_switcher = ("test_iter_0_warped","phi")
# trans_path_list = [os.path.join(trans_folder,inverse_phi_name(color_name.replace(*color_to_trans_switcher))+'.nii.gz') for color_name in color_name_list]
# output_folder ="/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_real_img_disp"
# atlas_label_path ="/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz"
# img_for_reference ="/playpen-raid/zyshen/data/oai_seg/atlas_image.nii.gz"
# os.makedirs(output_folder,exist_ok=True)
# gen_aug_from_brainstorm(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference)
#
# atlas_pair = "/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/atlas_to.txt"
# color_folder = "/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/test_sm/reg/res/records/3D"
# pair_list = read_txt_into_list(atlas_pair)
# img_name_list = [get_file_name(pair[1]) for pair in pair_list]
# color_path_list = [os.path.join(color_folder,fname + "_atlas_image_test_iter_0_warped.nii.gz") for fname in img_name_list]
# trans_folder = "/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_fluid_sr"
# color_to_trans_switcher = ("test_iter_0_warped","phi_map")
# trans_path_list =glob(os.path.join(trans_folder,'*'+'phi_map.nii.gz'))
# output_folder ="/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_real_img_fluid_sr"
# atlas_label_path ="/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz"
# img_for_reference ="/playpen-raid/zyshen/data/oai_seg/atlas_image.nii.gz"
# os.makedirs(output_folder,exist_ok=True)
# gen_aug_from_brainstorm_not_read_in_memory(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference)
#
color_folder = "/playpen-raid/zyshen/data/oai_reg/brainstorm/color_lrfix_res/reg/res/records/3D"
color_type="_atlas_image_test_iter_0_warped_test_iter_0_warped.nii.gz"
color_path_list = glob(os.path.join(color_folder,"*"+color_type))
trans_folder = "/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_fluid_sr"
color_to_trans_switcher = ("test_iter_0_warped","phi_map")
trans_path_list =glob(os.path.join(trans_folder,'*'+'phi_map.nii.gz'))
output_folder ="/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_fake_img_fluid_sr"
atlas_label_path ="/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz"
img_for_reference ="/playpen-raid/zyshen/data/oai_seg/atlas_image.nii.gz"
os.makedirs(output_folder,exist_ok=True)
gen_aug_from_brainstorm_not_read_in_memory(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference,input_0_1=False)
# color_folder = "/playpen-raid/zyshen/data/oai_reg/brainstorm/color_lrfix_res/reg/res/records/3D"
# trans_folder = "/playpen-raid/zyshen/data/oai_seg/atlas/phi_train"
# color_type="_atlas_image_test_iter_0_warped_test_iter_0_warped.nii.gz"
# color_to_trans_switcher = ("atlas_image_test_iter_0_warped_test_iter_0_warped","phi")
# output_folder ="/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_fake_img_fluidt1"
# atlas_label_path ="/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz"
# img_for_reference ="/playpen-raid/zyshen/data/oai_seg/atlas_image.nii.gz"
# os.makedirs(output_folder,exist_ok=True)
# color_path_list = glob(os.path.join(color_folder,"*"+color_type))
# color_name_list = [get_file_name(path) for path in color_path_list]
# trans_path_list = [os.path.join(trans_folder,color_name.replace(*color_to_trans_switcher)+'.nii.gz') for color_name in color_name_list]
# gen_aug_from_brainstorm(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference, input_0_1=False)
#
#
# atlas_pair = "/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/atlas_to.txt"
# color_folder = "/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/test_sm/reg/res/records/3D"
# pair_list = read_txt_into_list(atlas_pair)
# img_name_list = [get_file_name(pair[1]) for pair in pair_list]
# color_path_list = [os.path.join(color_folder,fname + "_atlas_image_test_iter_0_warped.nii.gz") for fname in img_name_list]
# color_name_list = [get_file_name(path) for path in color_path_list]
# trans_folder = "/playpen-raid/zyshen/data/oai_seg/atlas/phi_train"
# color_to_trans_switcher = ("test_iter_0_warped","phi")
# trans_path_list = [os.path.join(trans_folder,inverse_phi_name(color_name.replace(*color_to_trans_switcher))+'.nii.gz') for color_name in color_name_list]
# output_folder ="/playpen-raid1/zyshen/data/oai_reg/brain_storm/data_aug_real_img_fluidt1"
# atlas_label_path ="/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz"
# img_for_reference ="/playpen-raid/zyshen/data/oai_seg/atlas_image.nii.gz"
# os.makedirs(output_folder,exist_ok=True)
# gen_aug_from_brainstorm(color_path_list,trans_path_list, atlas_label_path, color_to_trans_switcher,output_folder,img_for_reference)
#
| 11,645 | 57.818182 | 170 | py |
easyreg | easyreg-master/data_pre/reg_preprocess_example/get_atlas_label.py | import os
import SimpleITK as sitk
from mermaid.utils import compute_warped_image_multiNC
from easyreg.reg_data_utils import *
from tools.image_rescale import save_image_with_given_reference
from glob import glob
import numpy as np
import torch
from easyreg.metrics import *
from functools import reduce
def make_one_hot(labels, C=2):
'''
Converts an integer label torch.autograd.Variable to a one-hot Variable.
Parameters
----------
labels : torch.autograd.Variable of torch.cuda.LongTensor
N x 1 x H x W, where N is batch size.
Each value is an integer representing correct classification.
C : integer.
number of classes in labels.
Returns
-------
target : torch.autograd.Variable of torch.cuda.FloatTensor
N x C x H x W, where C is class number. One-hot encoded.
'''
one_hot = torch.FloatTensor(labels.size(0), C, labels.size(2), labels.size(3),labels.size(4)).zero_()
target = one_hot.scatter_(1, labels.data, 1)
target = target
return target
def compute_atlas_label(lsource_folder_path, to_atlas_folder_pth,atlas_type, atlas_to_l_switcher,output_folder):
to_atlas_pth_list = glob(os.path.join(to_atlas_folder_pth,"*"+atlas_type))[:100]
to_atlas_name_list = [get_file_name(to_atlas_pth) for to_atlas_pth in to_atlas_pth_list]
l_pth_list = [os.path.join(lsource_folder_path,name.replace(*atlas_to_l_switcher)+'.nii.gz') for name in to_atlas_name_list]
fr_sitk = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))
l_list = [fr_sitk(pth)[None] for pth in l_pth_list]
to_atlas_list = [np.transpose(fr_sitk(pth)) for pth in to_atlas_pth_list]
l = np.stack(l_list).astype(np.int64)
num_c = len(np.unique(l))
to_atlas = np.stack(to_atlas_list)
l= torch.LongTensor(l)
to_atlas = torch.Tensor(to_atlas)
l_onehot = make_one_hot(l,C=num_c)
spacing = 1./(np.array(l.shape[2:])-1)
l_onehot = l_onehot.to(torch.float32)
warped_one_hot = compute_warped_image_multiNC(l_onehot,to_atlas,spacing = spacing, spline_order=1,zero_boundary=True)
sum_one_hot = torch.sum(warped_one_hot,0,keepdim=True)
voting = torch.max(torch.Tensor(sum_one_hot), 1)[1][None].to(torch.float32)
save_image_with_given_reference(voting,[l_pth_list[0]],output_folder,["atlas_label"])
#
#
# atlas_type = "_atlas_image_phi.nii.gz"
# atlas_to_l_switcher = ("image_atlas_image_phi","masks")
# lsource_folder_path ="/playpen-raid/olut/Nifti_resampled_rescaled_2Left_Affine2atlas"
# to_atlas_folder_pth ="/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/test_sm/reg/res/records"
# output_folder = "/playpen-raid/zyshen/data/oai_seg/atlas"
# compute_atlas_label(lsource_folder_path, to_atlas_folder_pth,atlas_type, atlas_to_l_switcher,output_folder)
def compute_reg_baseline(l_path_list, atlas_label_path, l_phi_path_list):
fr_sitk = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))
l_list = [fr_sitk(pth)[None] for pth in l_path_list]
atlas_label = [fr_sitk(atlas_label_path)[None]]*len(l_list)
to_atlas_list = [np.transpose(fr_sitk(pth)) for pth in l_phi_path_list]
atlas_label = torch.Tensor(np.stack(atlas_label))
l = torch.Tensor(np.stack(l_list))
to_atlas = np.stack(to_atlas_list)
to_atlas = torch.Tensor(to_atlas)
spacing = 1./(np.array(l.shape[2:])-1)
warped_label = compute_warped_image_multiNC(atlas_label,to_atlas,spacing = spacing, spline_order=0,zero_boundary=True)
res = get_multi_metric(warped_label,l)
average_dice, detailed_dice = np.mean(res['batch_avg_res']['dice'][0, 1:]), res['batch_avg_res']['dice']
print(average_dice)
test_path_list = "/playpen-raid/zyshen/data/oai_seg/test/file_path_list.txt"
atlas_label_path = '/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz'
img_label_list = read_txt_into_list(test_path_list)
label_path_list = [pair[1] for pair in img_label_list]
l_name_list = [get_file_name(label_path).replace("_masks","") for label_path in label_path_list]
atlas_to_folder_pth ="/playpen-raid1/zyshen/data/oai_seg/atlas/atlas/test_sm/reg/res/records"
#atlas_to_folder_pth ="/playpen-raid/zyshen/data/oai_seg/atlas/test/reg/res/records"
l_phi_list = [os.path.join(atlas_to_folder_pth,'atlas_image_'+name+"_image_phi.nii.gz") for name in l_name_list]
compute_reg_baseline(label_path_list, atlas_label_path, l_phi_list)
| 4,342 | 45.698925 | 128 | py |
easyreg | easyreg-master/data_pre/reg_preprocess_example/dirlab_eval.py | import os
import json
import numpy as np
import torch
import SimpleITK as sitk
import pyvista as pv
import mermaid.utils as py_utils
from data_pre.reg_preprocess_example.vtk_utils import save_vtk
from tools.visual_tools import save_3D_img_from_numpy
COPD_ID=[
"copd_000001",
"copd_000002",
"copd_000003",
"copd_000004",
"copd_000005",
"copd_000006",
"copd_000007",
"copd_000008",
"copd_000009",
"copd_000010"
]
COPD_info = {"copd_000001": {"insp":{'size': [512, 512, 482],'spacing': [0.625, 0.625, 0.625], 'origin': [-148.0, -145.0, -310.625]},
"exp":{'size': [512, 512, 473],'spacing': [0.625, 0.625, 0.625], 'origin': [-148.0, -145.0, -305.0]}},
"copd_000002": {"insp":{'size': [512, 512, 406],'spacing': [0.644531, 0.644531, 0.625], 'origin': [-176.9, -165.0, -254.625]},
"exp":{'size': [512, 512, 378],'spacing': [0.644531, 0.644531, 0.625], 'origin': [-177.0, -165.0, -237.125]}},
"copd_000003": {"insp":{'size': [512, 512, 502],'spacing': [0.652344, 0.652344, 0.625], 'origin': [-149.4, -167.0, -343.125]},
"exp":{'size': [512, 512, 464],'spacing': [0.652344, 0.652344, 0.625], 'origin': [-149.4, -167.0, -319.375]}},
"copd_000004": {"insp":{'size': [512, 512, 501],'spacing': [0.589844, 0.589844, 0.625], 'origin': [-124.1, -151.0, -308.25]},
"exp":{'size': [512, 512, 461],'spacing': [0.589844, 0.589844, 0.625], 'origin': [-124.1, -151.0, -283.25]}},
"copd_000005": {"insp":{'size': [512, 512, 522],'spacing': [0.646484, 0.646484, 0.625], 'origin': [-145.9, -175.9, -353.875]},
"exp":{'size': [512, 512, 522],'spacing': [0.646484, 0.646484, 0.625], 'origin': [-145.9, -175.9, -353.875]}},
"copd_000006": {"insp":{'size': [512, 512, 474],'spacing': [0.632812, 0.632812, 0.625], 'origin': [-158.4, -162.0, -299.625]},
"exp":{'size': [512, 512, 461],'spacing': [0.632812, 0.632812, 0.625], 'origin': [-158.4, -162.0, -291.5]}},
"copd_000007": {"insp":{'size': [512, 512, 446],'spacing': [0.625, 0.625, 0.625], 'origin': [-150.7, -160.0, -301.375]},
"exp":{'size': [512, 512, 407],'spacing': [0.625, 0.625, 0.625], 'origin': [-151.0, -160.0, -284.25]}},
"copd_000008": {"insp":{'size': [512, 512, 458],'spacing': [0.585938, 0.585938, 0.625], 'origin': [-142.3, -147.4, -313.625]},
"exp":{'size': [512, 512, 426],'spacing': [0.585938, 0.585938, 0.625], 'origin': [-142.3, -147.4, -294.625]}},
"copd_000009": {"insp":{'size': [512, 512, 461],'spacing': [0.664062, 0.664062, 0.625], 'origin': [-156.1, -170.0, -310.25]},
"exp":{'size': [512, 512, 380],'spacing': [0.664062, 0.664062, 0.625], 'origin': [-156.1, -170.0, -259.625]}},
"copd_000010": {"insp":{'size': [512, 512, 535],'spacing': [0.742188, 0.742188, 0.625], 'origin': [-189.0, -176.0, -355.0]},
"exp":{'size': [512, 512, 539],'spacing': [0.742188, 0.742188, 0.625], 'origin': [-189.0, -176.0, -346.25]}}
}
# resampled_image_json_info = "/playpen-raid1/zyshen/data/lung_resample_350/dirlab_350_sampled.json"
# dirlab_folder_path = "/playpen-raid1/zyshen/data/lung_resample_350/landmarks"
# org_image_sz = np.array([350,350,350]) #physical spacing equals to 1
# with open(resampled_image_json_info) as f:
# img_info = json.load(f)
#
#
# def read_index(index_path):
# index = np.load(index_path)
# return flip_coord(index)
#
# def get_spacing_and_origin(case_id):
# return np.flip(img_info[case_id]["spacing"]), np.flip(img_info[case_id]["origin"])
#
# def flip_coord(coord):
# coord_flip = coord.copy()
# coord_flip[:,0] = coord[:,2]
# coord_flip[:,2] = coord[:,0]
# return coord_flip
#
# def transfer_into_itk_coord(points):
# return flip_coord(points)
#
# def get_landmark(pair_name, resampled_image_sz):
# case_id = pair_name.split("_")
# case_id =case_id[0]+"_"+case_id[1]
# slandmark_index_path = os.path.join(dirlab_folder_path,case_id+"_EXP_index.npy")
# tlandmark_index_path = os.path.join(dirlab_folder_path,case_id+"_INSP_index.npy")
# slandmark_index = read_index(slandmark_index_path)
# tlandmark_index = read_index(tlandmark_index_path)
# slandmark_index = slandmark_index*(resampled_image_sz-1)/(org_image_sz-1)
# tlandmark_index = tlandmark_index*(resampled_image_sz-1)/(org_image_sz-1)
# s_spacing, s_origin = get_spacing_and_origin(case_id+"_EXP")
# t_spacing, t_origin= get_spacing_and_origin(case_id+"_INSP")
# assert (s_spacing == t_spacing).all()
# physical_spacing = (org_image_sz-1)*s_spacing/(resampled_image_sz-1) # we assume source and target has the same spacing
# return slandmark_index, tlandmark_index, physical_spacing, s_origin, t_origin
#
#
# def eval_on_dirlab_per_case(forward_map,inv_map, pair_name,moving, target,record_path):
# transform_shape = np.array(forward_map.shape[2:])
# slandmark_index,tlandmark_index, physical_spacing, s_origin, t_origin = get_landmark(pair_name,transform_shape)
# spacing = 1./(transform_shape-1)
# slandmark_img_coord = spacing*slandmark_index
# tlandmark_img_coord = spacing*tlandmark_index
# slandmark_physical = physical_spacing*slandmark_index+s_origin
# tlandmark_physical = physical_spacing*tlandmark_index+t_origin
# # target = target.squeeze().clone()
# # for coord in tlandmark_index:
# # coord_int = [int(c) for c in coord]
# # target[coord_int[0],coord_int[1],coord_int[2]] = 10.
# # save_3D_img_from_numpy(target.detach().cpu().numpy().squeeze(),"/playpen-raid2/zyshen/debug/{}_debug.nii.gz".format(pair_name))
#
#
# tlandmark_img_coord_reshape = torch.Tensor(tlandmark_img_coord.transpose(1,0)).view([1,3,-1,1,1])
# tlandmark_img_coord_reshape = tlandmark_img_coord_reshape.to(forward_map.device)
# ts_landmark_img_coord = py_utils.compute_warped_image_multiNC(forward_map, tlandmark_img_coord_reshape*2-1, spacing, 1, zero_boundary=False,use_01_input=False)
# ts_landmark_img_coord = ts_landmark_img_coord.squeeze().transpose(1,0).detach().cpu().numpy()
# diff_ts = (slandmark_img_coord - ts_landmark_img_coord)/spacing*physical_spacing
#
#
# # target = target.squeeze().clone()
# # for coord in ts_landmark_img_coord:
# # coord_int = [int(c) for c in coord/spacing]
# # target[coord_int[0],coord_int[1],coord_int[2]] = 10.
# # save_3D_img_from_numpy(target.detach().cpu().numpy().squeeze(),"/playpen-raid2/zyshen/debug/{}_debug_warped.nii.gz".format(pair_name))
#
# slandmark_img_coord_reshape = torch.Tensor(slandmark_img_coord.transpose(1, 0)).view([1, 3, -1, 1, 1])
# slandmark_img_coord_reshape = slandmark_img_coord_reshape.to(inv_map.device)
# st_landmark_img_coord = py_utils.compute_warped_image_multiNC(inv_map, slandmark_img_coord_reshape * 2 - 1,
# spacing, 1, zero_boundary=False, use_01_input=False)
# st_landmark_img_coord = st_landmark_img_coord.squeeze().transpose(1, 0).detach().cpu().numpy()
# landmark_saving_folder = os.path.join(record_path,"landmarks")
# os.makedirs(landmark_saving_folder, exist_ok=True)
# save_vtk(os.path.join(landmark_saving_folder,"{}_source.vtk".format(pair_name)),{"points":transfer_into_itk_coord(slandmark_physical)})
# save_vtk(os.path.join(landmark_saving_folder,"{}_target.vtk".format(pair_name)),{"points":transfer_into_itk_coord(tlandmark_physical)})
# save_vtk(os.path.join(landmark_saving_folder,"{}_target_warp_to_source.vtk".format(pair_name)),{"points":transfer_into_itk_coord(ts_landmark_img_coord / spacing*physical_spacing+s_origin)})
# save_vtk(os.path.join(landmark_saving_folder,"{}_source_warp_to_target.vtk".format(pair_name)),{"points":transfer_into_itk_coord(st_landmark_img_coord / spacing*physical_spacing+t_origin)})
# diff_st = (tlandmark_img_coord - st_landmark_img_coord) / spacing * physical_spacing
#
# return np.linalg.norm(diff_ts,ord=2,axis=1).mean(), np.linalg.norm(diff_st,ord=2,axis=1).mean()
#
def evaluate_on_dirlab(inv_map,dirlab_id,moving_itk, target_itk,record_path):
MAPPING = {
"copd_000006" : "12042G" ,
"copd_000007" : "12105E" ,
"copd_000008" : "12109M" ,
"copd_000009" : "12239Z" ,
"copd_000010" : "12829U" ,
"copd_000001" : "13216S" ,
"copd_000002" : "13528L" ,
"copd_000003" : "13671Q" ,
"copd_000004" : "13998W" ,
"copd_000005" : "17441T"
}
COPD_ID = [
"copd_000001",
"copd_000002",
"copd_000003",
"copd_000004",
"copd_000005",
"copd_000006",
"copd_000007",
"copd_000008",
"copd_000009",
"copd_000010"
]
def get_dirlab_landmark(case_id):
# assert case_id in COPD_ID
exp_landmark_path = os.path.join("./lung_reg/landmarks", MAPPING[case_id] + "_EXP_STD_USD_COPD.vtk")
insp_landmark_path = os.path.join("./lung_reg/landmarks", MAPPING[case_id] + "_INSP_STD_USD_COPD.vtk")
exp_landmark = read_vtk(exp_landmark_path)["points"]
insp_landmark = read_vtk(insp_landmark_path)["points"]
return exp_landmark, insp_landmark
def read_vtk(path):
data = pv.read(path)
data_dict = {}
data_dict["points"] = data.points.astype(np.float32)
data_dict["faces"] = data.faces.reshape(-1, 4)[:, 1:].astype(np.int32)
for name in data.array_names:
try:
data_dict[name] = data[name]
except:
pass
return data_dict
def warp_points(points, inv_map, case_id):
"""
in easyreg the inv transform coord is from [0,1], so here we need to read mesh in voxel coord and then normalized it to [0,1],
the last step is to transform warped mesh into word/ voxel coord
the transformation map use default [0,1] coord unless the ref img is provided
here the transform map is in inversed voxel space or in inversed physical space ( width,height, depth)
but the points should be in standard voxel/itk space (depth, height, width)
:return:
"""
import numpy as np
import torch.nn.functional as F
# first make everything in voxel coordinate, depth, height, width
phi_sz = np.array(inv_map.shape[1:])
moving_img = moving_itk
moving_spacing = moving_img.GetSpacing()
moving_spacing = np.array(moving_spacing)
moving_origin = moving_img.GetOrigin()
moving_origin = np.array(moving_origin)
img_sz = moving_img.GetSize()
standard_spacing = 1 / (np.array(img_sz) - 1) # depth, height, width
target_img = target_itk
target_spacing = target_img.GetSpacing()
target_spacing = np.array(target_spacing)
target_origin = target_img.GetOrigin()
target_origin = np.array(target_origin)
# moving_spacing = np.array(COPD_info[case_id]["exp"]["spacing"])
# moving_origin = np.array(COPD_info[case_id]["exp"]["origin"])
#
# target_spacing = np.array(COPD_info[case_id]["insp"]["spacing"])
# target_origin = np.array(COPD_info[case_id]["insp"]["origin"])
# moving = sitk.GetArrayFromImage(moving_img)
# slandmark_index = (points-moving_origin) / moving_spacing
# for coord in slandmark_index:
# coord_int = [int(c) for c in coord]
# moving[coord_int[2],coord_int[1],coord_int[0]] = 2.
# save_3D_img_from_numpy(moving,"/playpen-raid2/zyshen/debug/{}_padded.nii.gz".format(dirlab_id+"_moving"),
# spacing=moving_img.GetSpacing(), orgin=moving_img.GetOrigin(), direction=moving_img.GetDirection())
points = (points - moving_origin) / moving_spacing * standard_spacing
points = points * 2 - 1
grid_sz = [1] + [points.shape[0]] + [1, 1, 3] # 1*N*1*1*3
grid = points.reshape(*grid_sz)
grid = torch.Tensor(grid).cuda()
inv_map_sz = [1, 3] + list(phi_sz) # width,height, depth
inv_map = inv_map.view(*inv_map_sz) # 1*3*X*Y*Z
points_wraped = F.grid_sample(inv_map, grid, mode='bilinear', padding_mode='border',
align_corners=True) # 1*3*N*1*1
points_wraped = points_wraped.detach().cpu().numpy()
points_wraped = np.transpose(np.squeeze(points_wraped))
points_wraped = np.flip(points_wraped, 1) / standard_spacing * target_spacing + target_origin
warp = sitk.GetArrayFromImage(target_img)
wlandmark_index = (points_wraped - target_origin) / target_spacing
for coord in wlandmark_index:
coord_int = [int(c) for c in coord]
warp[coord_int[2], coord_int[1], coord_int[0]] = 2.
save_3D_img_from_numpy(warp, "/playpen-raid2/zyshen/debug/{}_debug.nii.gz".format(dirlab_id+"warp"))
return points_wraped
assert dirlab_id in COPD_ID, "{} doesn't belong to ten dirlab cases:{}".format(dirlab_id, COPD_ID.keys())
exp_landmark, insp_landmark = get_dirlab_landmark(dirlab_id)
warped_landmark = warp_points(exp_landmark, inv_map,dirlab_id)
diff = warped_landmark - insp_landmark
diff_norm = np.linalg.norm(diff, ord=2, axis=1)
print("before register landmark error norm: {}".format(
np.linalg.norm(exp_landmark - insp_landmark, ord=2, axis=1).mean()))
print("after register landmark error norm: {}".format(diff_norm.mean()))
return diff_norm.mean()
def eval_on_dirlab(forward_map,inverse_map, pair_name_list,pair_path_list, moving, target, record_path=None):
diff_ts, diff_st = [], []
for _forward_map, _inv_map, pair_name,s_pth, t_pth in zip(forward_map,inverse_map,pair_name_list,pair_path_list[0],pair_path_list[1]):
source_sitk = sitk.ReadImage(s_pth)
target_sitk = sitk.ReadImage(t_pth)
if pair_name in COPD_ID:
_diff_st = evaluate_on_dirlab(_inv_map,pair_name,source_sitk,target_sitk, record_path)
_diff_ts = evaluate_on_dirlab(_forward_map,pair_name,target_sitk,source_sitk, record_path)
diff_ts.append(_diff_ts), diff_st.append(_diff_st)
print("dirlab landmark source to target error:{}:{}".format(pair_name_list,diff_st))
print("dirlab landmark target_to source error:{}:{}".format(pair_name_list,diff_ts))
| 14,618 | 52.746324 | 195 | py |
easyreg | easyreg-master/demo/hack.py | import os
import numpy as np
import torch
import SimpleITK as sitk
import pyvista as pv
from easyreg.net_utils import gen_identity_map
from tools.image_rescale import permute_trans
from tools.module_parameters import ParameterDict
from easyreg.multiscale_net_improved import Multiscale_FlowNet as model
from easyreg.utils import resample_image, get_transform_with_itk_format, dfield2bspline
from tools.visual_tools import save_3D_img_from_numpy, save_3D_img_from_itk
import mermaid.utils as py_utils
#dirlab_landmarks_folder = "/playpen-raid1/zyshen/lung_reg/evaluate/dirlab_landmarks"
dirlab_landmarks_folder = "./lung_reg/landmarks"
def resize_img(img, img_after_resize=None, is_mask=False):
"""
:param img: sitk input, factor is the outputs_ize/patched_sized
:param img_after_resize: list, img_after_resize, image size after resize in itk coordinate
:return:
"""
img_sz = img.GetSize()
if img_after_resize is not None:
img_after_resize = img_after_resize
else:
img_after_resize = img_sz
resize_factor = np.array(img_after_resize) / np.flipud(img_sz)
spacing_factor = (np.array(img_after_resize) - 1) / (np.flipud(img_sz) - 1)
resize = not all([factor == 1 for factor in resize_factor])
if resize:
resampler = sitk.ResampleImageFilter()
dimension = 3
factor = np.flipud(resize_factor)
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / spacing_factor[0]
matrix[1, 1] = 1. / spacing_factor[1]
matrix[2, 2] = 1. / spacing_factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_mask:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkLinear)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
return img_resampled, resize_factor
def resample_image_itk_by_spacing_and_size(
image,
output_spacing,
output_size,
output_type=None,
interpolator=sitk.sitkBSpline,
padding_value=-1024,
center_padding=True,
):
"""
Image resampling using ITK
:param image: simpleITK image
:param output_spacing: numpy array or tuple. Output spacing
:param output_size: numpy array or tuple. Output size
:param output_type: simpleITK output data type. If None, use the same as 'image'
:param interpolator: simpleITK interpolator (default: BSpline)
:param padding_value: pixel padding value when a transformed pixel is outside of the image
:return: tuple with simpleITK image and array with the resulting output spacing
"""
resampler = sitk.ResampleImageFilter()
resampler.SetOutputDirection(image.GetDirection())
resampler.SetSize(output_size)
resampler.SetDefaultPixelValue(padding_value)
resampler.SetInterpolator(interpolator)
resampler.SetOutputSpacing(np.array(output_spacing))
resampler.SetOutputPixelType(
output_type if output_type is not None else image.GetPixelIDValue()
)
factor = np.asarray(image.GetSpacing()) / np.asarray(output_spacing).astype(
np.float32
)
# Get new output origin
if center_padding:
real_output_size = np.round(
np.asarray(image.GetSize()) * factor + 0.0005
).astype(np.uint32)
diff = ((output_size - real_output_size) * np.asarray(output_spacing)) / 2
output_origin = np.asarray(image.GetOrigin()) - diff
# output_origin = output_origin - np.asarray(image.GetSpacing()) / 2 \
# + output_spacing / 2
else:
output_origin = np.asarray(image.GetOrigin())
resampler.SetOutputOrigin(output_origin)
return resampler.Execute(image)
def normalize_img(img, is_mask=False):
"""
:param img: numpy image
:return:
"""
if not is_mask:
img[img < -1000] = -1000
img[img > -200] = -200
img = (img+1000.)/800
else:
img[img > 400] = 0
img[img != 0] = 1
return img
def preprocess(img_sitk,is_mask=False):
processed_img = resample_image_itk_by_spacing_and_size(img_sitk, output_spacing=np.array([1., 1., 1.]),
output_size=[350, 350, 350], output_type=None,
interpolator=sitk.sitkBSpline if not is_mask else sitk.sitkNearestNeighbor,
padding_value=-1000 if not is_mask else 0, center_padding=True)
img_np = sitk.GetArrayFromImage(processed_img)
img_np = normalize_img(img_np.astype(np.float32),is_mask)
sitk_img = sitk.GetImageFromArray(img_np)
sitk_img.SetOrigin(processed_img.GetOrigin())
sitk_img.SetSpacing(processed_img.GetSpacing())
sitk_img.SetDirection(processed_img.GetDirection())
return sitk_img
def convert_itk_to_support_deepnet(img_sitk, is_mask=False,device=torch.device("cuda:0")):
img_sz_after_resize = [256]*3
img_sitk = sitk.GetImageFromArray(sitk.GetArrayFromImage(img_sitk))
img_after_resize,_ = resize_img(img_sitk,img_sz_after_resize, is_mask=is_mask)
img_numpy = sitk.GetArrayFromImage(img_after_resize)
if not is_mask:
img_numpy = img_numpy*2-1
return torch.Tensor(img_numpy.astype(np.float32))[None][None].to(device)
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def convert_transform_into_itk_bspline(transform,spacing,moving_ref,target_ref):
if type(transform) == torch.Tensor:
transform = transform.detach().cpu().numpy()
cur_trans = transform[0]
img_sz = np.array(transform.shape[2:])
moving_spacing_ref = moving_ref.GetSpacing()
moving_direc_ref = moving_ref.GetDirection()
moving_orig_ref = moving_ref.GetOrigin()
target_spacing_ref = target_ref.GetSpacing()
target_direc_ref = target_ref.GetDirection()
target_orig_ref = target_ref.GetOrigin()
id_np_moving = identity_map(img_sz, np.flipud(moving_spacing_ref))
id_np_target = identity_map(img_sz, np.flipud(target_spacing_ref))
factor = np.flipud(moving_spacing_ref) / spacing
factor = factor.reshape(3,1,1,1)
moving_direc_matrix = np.array(moving_direc_ref).reshape(3, 3)
target_direc_matrix = np.array(target_direc_ref).reshape(3, 3)
cur_trans = np.matmul(moving_direc_matrix, permute_trans(id_np_moving + cur_trans * factor).reshape(3, -1)) \
- np.matmul(target_direc_matrix, permute_trans(id_np_target).reshape(3, -1))
cur_trans = cur_trans.reshape(id_np_moving.shape)
bias = np.array(target_orig_ref)-np.array(moving_orig_ref)
bias = -bias.reshape(3,1,1,1)
transform_physic = cur_trans +bias
disp_itk = get_transform_with_itk_format(transform_physic,target_spacing_ref, target_orig_ref,target_direc_ref)
#sitk.WriteTransform(trans, saving_path)
# Retrive the DField from the Transform
dfield = disp_itk.GetDisplacementField()
# Fitting a BSpline from the Deformation Field
bstx = dfield2bspline(dfield, verbose=True)
return disp_itk, bstx
def convert_output_into_itk_support_format(source_itk,target_itk, l_source_itk, l_target_itk, phi,spacing):
phi = (phi+1)/2 # here we assume the phi take the [-1,1] coordinate, usually used by deep network
new_phi = None
warped = None
l_warped = None
new_spacing = None
if source_itk is not None:
s = sitk.GetArrayFromImage(source_itk)
t = sitk.GetArrayFromImage(target_itk)
sz_t = [1, 1] + list(t.shape)
source = torch.from_numpy(s[None][None]).to(phi.device)
new_phi, new_spacing = resample_image(phi, spacing, sz_t, 1, zero_boundary=True)
warped = py_utils.compute_warped_image_multiNC(source, new_phi, new_spacing, 1, zero_boundary=True)
if l_source_itk is not None:
ls = sitk.GetArrayFromImage(l_source_itk)
lt = sitk.GetArrayFromImage(l_target_itk)
sz_lt = [1, 1] + list(lt.shape)
l_source = torch.from_numpy(ls[None][None]).to(phi.device)
if new_phi is None:
new_phi, new_spacing = resample_image(phi, spacing, sz_lt, 1, zero_boundary=True)
l_warped = py_utils.compute_warped_image_multiNC(l_source, new_phi, new_spacing, 0, zero_boundary=True)
id_map = gen_identity_map(warped.shape[2:], resize_factor=1., normalized=True).cuda()
id_map = (id_map[None] + 1) / 2.
disp = new_phi - id_map
disp_itk, bspline_itk = None, None# convert_transform_into_itk_bspline(disp,new_spacing,source_itk, target_itk)
return new_phi, warped,l_warped, new_spacing, disp_itk, bspline_itk
def predict(source_itk, target_itk,source_mask_itk=None, target_mask_itk=None, model_path="",device=torch.device("cuda:0")):
setting_path = "./lung_reg/task_setting.json"
opt = ParameterDict()
opt.load_JSON(setting_path)
source = convert_itk_to_support_deepnet(source_itk,device=device)
target = convert_itk_to_support_deepnet(target_itk, device=device)
source_mask = convert_itk_to_support_deepnet(source_mask_itk,is_mask=True) if source_mask_itk is not None else None
target_mask = convert_itk_to_support_deepnet(target_mask_itk,is_mask=True) if target_mask_itk is not None else None
network = model(img_sz=[256,256,256],opt=opt)
network.load_pretrained_model(model_path)
network.to(device)
network.train(False)
with torch.no_grad():
warped, composed_map, affine_map = network.forward(source, target, source_mask, target_mask)
inv_warped, composed_inv_map, inv_affine_map = network.forward(target, source, target_mask, source_mask)
spacing = 1./(np.array(warped.shape[2:])-1)
del network
full_composed_map, full_warped,l_full_warped, _,disp_itk, bspline_itk = convert_output_into_itk_support_format(source_itk,target_itk, source_mask_itk, target_mask_itk, composed_map,spacing)
full_composed_map = full_composed_map.detach().cpu().squeeze().numpy()
full_inv_composed_map, full_inv_warped,l_full_inv_warped, _,inv_disp_itk, inv_bspline_itk = convert_output_into_itk_support_format(target_itk,source_itk, target_mask_itk, source_mask_itk, composed_inv_map,spacing)
full_inv_composed_map = full_inv_composed_map.detach().cpu().squeeze().numpy()
full_affine_map, full_affined, l_full_affined, _,affine_disp_itk, affine_bspline_itk = convert_output_into_itk_support_format(source_itk,target_itk,target_mask_itk,source_mask_itk, affine_map,spacing)
full_affine_map = full_affine_map.detach().cpu().squeeze().numpy()
full_inv_affine_map, full_inv_affined, l_full_inv_affined, _,affine_inv_disp_itk, affine_inv_bspline_itk = convert_output_into_itk_support_format(target_itk, source_itk, target_mask_itk, source_mask_itk, inv_affine_map, spacing)
full_inv_affine_map = full_inv_affine_map.detach().cpu().squeeze().numpy()
# from easyreg.demons_utils import sitk_grid_sampling
# save_3D_img_from_itk(sitk_grid_sampling(target_itk,source_itk, disp_itk),
# "/playpen-raid1/zyshen/debug/debug_lin_model_st_itk.nii.gz")
# save_3D_img_from_itk(sitk_grid_sampling(source_itk, target_itk, inv_disp_itk),
# "/playpen-raid1/zyshen/debug/debug_lin_model_ts_itk.nii.gz")
# save_3D_img_from_itk(source_itk, "/playpen-raid1/zyshen/debug/debug_lin_model_source_itk.nii.gz")
# save_3D_img_from_itk(target_itk, "/playpen-raid1/zyshen/debug/debug_lin_model_target_itk.nii.gz")
# save_3D_img_from_numpy(full_warped.squeeze().cpu().numpy(),
# "/playpen-raid1/zyshen/debug/debug_lin_model_st.nii.gz",
# target_itk.GetSpacing(), target_itk.GetOrigin(), target_itk.GetDirection())
# save_3D_img_from_numpy(full_inv_warped.squeeze().cpu().numpy(),"/playpen-raid1/zyshen/debug/debug_lin_model_ts.nii.gz",
# source_itk.GetSpacing(), source_itk.GetOrigin(), source_itk.GetDirection())
return {"phi": full_composed_map, "inv_phi": full_inv_composed_map,"bspline":bspline_itk, "inv_bspline":inv_bspline_itk,
"affine_phi": full_affine_map, "affine_inv_phi": full_inv_affine_map,"affine_bspline":affine_bspline_itk, "afffine_inv_bspline":affine_inv_bspline_itk}
MAPPING = {
"12042G": "copd_000006",
"12105E": "copd_000007",
"12109M": "copd_000008",
"12239Z": "copd_000009",
"12829U": "copd_000010",
"13216S": "copd_000001",
"13528L": "copd_000002",
"13671Q": "copd_000003",
"13998W": "copd_000004",
"17441T": "copd_000005"
}
INV_MAPPING = {
"6": "12042G",
"7": "12105E",
"8": "12109M",
"9": "12239Z",
"10": "12829U",
"1": "13216S",
"2": "13528L",
"3": "13671Q",
"4": "13998W",
"5": "17441T"
}
def evaluate_on_dirlab(inv_map,moving_itk, target_itk,dirlab_id):
def get_dirlab_landmark(case_id):
# assert case_id in COPD_ID
exp_landmark_path = os.path.join(dirlab_landmarks_folder, case_id + "_EXP_STD_USD_COPD.vtk")
insp_landmark_path = os.path.join(dirlab_landmarks_folder, case_id + "_INSP_STD_USD_COPD.vtk")
exp_landmark = read_vtk(exp_landmark_path)["points"]
insp_landmark = read_vtk(insp_landmark_path)["points"]
return exp_landmark, insp_landmark
def read_vtk(path):
data = pv.read(path)
data_dict = {}
data_dict["points"] = data.points.astype(np.float32)
data_dict["faces"] = data.faces.reshape(-1, 4)[:, 1:].astype(np.int32)
for name in data.array_names:
try:
data_dict[name] = data[name]
except:
pass
return data_dict
def warp_points(points, inv_map, moving_itk, target_itk):
"""
in easyreg the inv transform coord is from [0,1], so here we need to read mesh in voxel coord and then normalized it to [0,1],
the last step is to transform warped mesh into word/ voxel coord
the transformation map use default [0,1] coord unless the ref img is provided
here the transform map is in inversed voxel space or in inversed physical space ( width,height, depth)
but the points should be in standard voxel/itk space (depth, height, width)
:return:
"""
import numpy as np
import torch.nn.functional as F
# first make everything in voxel coordinate, depth, height, width
img_sz = np.array(inv_map.shape[1:])
standard_spacing = 1 / (img_sz - 1) # width,height, depth
standard_spacing = np.flipud(standard_spacing) # depth, height, width
moving_img = moving_itk
moving_spacing = moving_img.GetSpacing()
moving_spacing = np.array(moving_spacing)
moving_origin = moving_img.GetOrigin()
moving_origin = np.array(moving_origin)
target_img = target_itk
target_spacing = target_img.GetSpacing()
target_spacing = np.array(target_spacing)
target_origin = target_img.GetOrigin()
target_origin = np.array(target_origin)
moving = sitk.GetArrayFromImage(moving_img)
slandmark_index = (points-moving_origin) / moving_spacing
for coord in slandmark_index:
coord_int = [int(c) for c in coord]
moving[coord_int[2],coord_int[1],coord_int[0]] = 2.
# save_3D_img_from_numpy(moving,"/playpen-raid2/zyshen/debug/{}_padded.nii.gz".format(dirlab_id+"_moving"),
# spacing=moving_img.GetSpacing(), orgin=moving_img.GetOrigin(), direction=moving_img.GetDirection())
points = (points - moving_origin) / moving_spacing * standard_spacing
points = points * 2 - 1
grid_sz = [1] + [points.shape[0]] + [1, 1, 3] # 1*N*1*1*3
grid = points.reshape(*grid_sz)
grid = torch.Tensor(grid).cuda()
inv_map = torch.Tensor(inv_map).cuda()
inv_map_sz = [1, 3] + list(img_sz) # width,height, depth
inv_map = inv_map.view(*inv_map_sz) # 1*3*X*Y*Z
points_wraped = F.grid_sample(inv_map, grid, mode='bilinear', padding_mode='border',
align_corners=True) # 1*3*N*1*1
points_wraped = points_wraped.detach().cpu().numpy()
points_wraped = np.transpose(np.squeeze(points_wraped))
points_wraped = np.flip(points_wraped, 1) / standard_spacing * target_spacing + target_origin
warp = sitk.GetArrayFromImage(target_img)
wlandmark_index = (points_wraped - target_origin) / target_spacing
for coord in wlandmark_index:
coord_int = [int(c) for c in coord]
warp[coord_int[2], coord_int[1], coord_int[0]] = 2.
# save_3D_img_from_numpy(warp, "/playpen-raid2/zyshen/debug/{}_debug.nii.gz".format("warp"))
return points_wraped
assert dirlab_id in MAPPING, "{} doesn't belong to ten dirlab cases:{}".format(dirlab_id, MAPPING.keys())
exp_landmark, insp_landmark = get_dirlab_landmark(dirlab_id)
warped_landmark = warp_points(exp_landmark, inv_map, moving_itk, target_itk)
diff = warped_landmark - insp_landmark
diff_norm = np.linalg.norm(diff, ord=2, axis=1)
print("before register landmark error norm: {}".format(
np.linalg.norm(exp_landmark - insp_landmark, ord=2, axis=1).mean()))
print("after register landmark error norm: {}".format(diff_norm.mean()))
def get_file_name(img_path):
get_fn = lambda x: os.path.split(x)[-1]
file_name = get_fn(img_path).split(".")[0]
return file_name
if __name__ == "__main__":
for case_id in range(1,11):
case_id = str(case_id)
case_name = INV_MAPPING[case_id]
source_path = "/playpen-raid1/Data/DIRLABCasesHighRes/{}_EXP_STD_USD_COPD.nrrd".format(case_name)
target_path = "/playpen-raid1/Data/DIRLABCasesHighRes/{}_INSP_STD_USD_COPD.nrrd".format(case_name)
source_mask_path = "/playpen-raid1/lin.tian/data/raw/DIRLABCasesHighRes/copd{}/copd{}_EXP_label.nrrd".format(case_id,case_id)
target_mask_path = "/playpen-raid1/lin.tian/data/raw/DIRLABCasesHighRes/copd{}/copd{}_INSP_label.nrrd".format(case_id,case_id)
model_path = "./lung_reg/model_256"
source_itk = sitk.ReadImage(source_path)
target_itk = sitk.ReadImage(target_path)
source_mask_itk = sitk.ReadImage(source_mask_path) if len(source_mask_path) else None
target_mask_itk = sitk.ReadImage(target_mask_path) if len(target_mask_path) else None
preprocessed_source_itk = preprocess(source_itk)
preprocessed_target_itk = preprocess(target_itk)
#sitk.WriteImage(preprocessed_source_itk,"/playpen-raid1/zyshen/debug/12042G_preprocessed.nii.gz")
if source_mask_itk is not None and target_mask_itk is not None:
preprocessed_source_mask_itk = preprocess(source_mask_itk,is_mask=True)
preprocessed_target_mask_itk = preprocess(target_mask_itk,is_mask=True)
else:
preprocessed_source_mask_itk = None
preprocessed_target_mask_itk = None
output_dict = predict(preprocessed_source_itk, preprocessed_target_itk,preprocessed_source_mask_itk,preprocessed_target_mask_itk,
model_path=model_path)
dirlab_id = get_file_name(source_path).split("_")[0]
evaluate_on_dirlab(output_dict["inv_phi"], preprocessed_source_itk, preprocessed_target_itk, dirlab_id) | 21,097 | 44.765727 | 232 | py |
easyreg | easyreg-master/demo/demo_for_data_aug.py | """
A demo for fluid-based data augmentation.
"""
import matplotlib as matplt
import subprocess
matplt.use('Agg')
import os, sys, time
import torch
torch.backends.cudnn.benchmark=True
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../easy_reg'))
# sys.path.insert(0,os.path.abspath('../mermaid'))
import tools.module_parameters as pars
from easyreg.aug_utils import *
def generate_txt_for_registration(file_txt,name_txt, txt_format,output_path,pair_num_limit=-1,per_num_limit=-1):
if txt_format =="aug_by_file":
pair_list_txt, pair_name_list = get_pair_list_txt_by_file(file_txt,name_txt,output_path,pair_num_limit,per_num_limit)
else:
pair_list_txt, pair_name_list = get_pair_list_txt_by_line(file_txt,name_txt,output_path,pair_num_limit,per_num_limit)
return pair_list_txt, pair_name_list
def init_reg_env(args):
task_output_path = args.task_output_path
run_demo = args.run_demo
name_txt = args.name_txt
if run_demo:
demo_name = args.demo_name
setting_folder_path = os.path.join('./demo_settings/data_aug', demo_name)
task_output_path = os.path.join('./data_aug_demo_output', demo_name)
args.task_output_path = task_output_path
file_txt = os.path.join(task_output_path,"input.txt")
txt_format = "aug_by_line" if demo_name=="opt_lddmm_lpba" else "aug_by_file"
else:
txt_format = args.txt_format
file_txt = args.file_txt
setting_folder_path = args.setting_folder_path
os.makedirs(task_output_path, exist_ok=True)
output_pair_list_txt, output_name_list_txt = generate_txt_for_registration(file_txt,name_txt, txt_format, task_output_path,args.max_size_of_pair_to_reg,args.max_size_of_target_set_to_reg)
return setting_folder_path, output_pair_list_txt, output_name_list_txt
def do_registration(txt_path, name_path, setting_folder_path,output_path,gpu_id_list):
if len(gpu_id_list)==1:
cmd = "python demo_for_easyreg_eval.py "
cmd +="-ts={} -txt={} -pntxt={} -o={} -g={}".format(setting_folder_path,txt_path,name_path,output_path,int(gpu_id_list[0]))
p = subprocess.Popen(cmd, shell=True)
p.wait()
else:
num_split = len(gpu_id_list)
num_split = split_txt(txt_path, num_split, output_path, "p")
split_txt(name_path, num_split, output_path, "pn")
sub_txt_path_list = [os.path.join(output_path, 'p{}.txt'.format(i)) for i in range(num_split)]
sub_name_path_list = [os.path.join(output_path, 'pn{}.txt'.format(i)) for i in range(num_split)]
processes = []
for i in range(num_split):
cmd = "echo GPU {} \n".format(gpu_id_list[i])
cmd += "python demo_for_easyreg_eval.py "
cmd += "-ts={} -txt={} -pntxt={} -o={} -g={}\n".format(setting_folder_path, sub_txt_path_list[i],sub_name_path_list[i], output_path, int(gpu_id_list[i]))
p = subprocess.Popen(cmd, shell=True)
processes.append(p)
time.sleep(60)
exit_codes = [p.wait() for p in processes]
def init_aug_env(reg_pair_list_txt,reg_name_list_txt,task_output_path,setting_folder_path):
aug_output_path = os.path.join(task_output_path, "aug")
os.makedirs(aug_output_path,exist_ok=True)
aug_setting_path = os.path.join(setting_folder_path, "data_aug_setting.json")
aug_setting = pars.ParameterDict()
aug_setting.load_JSON(aug_setting_path)
fluid_mode = aug_setting["data_aug"]["fluid_aug"]["fluid_mode"]
reg_res_folder_path = os.path.join(task_output_path,"reg/res/records")
aug_input_txt = os.path.join(aug_output_path,"aug_input_path.txt")
aug_name_txt = os.path.join(aug_output_path,"aug_input_name.txt")
affine_path = None
if fluid_mode == "aug_with_nonaffined_data":
affine_path = reg_res_folder_path
if fluid_mode == "aug_with_atlas":
aug_setting["data_aug"]["fluid_aug"]["to_atlas_folder"] = reg_res_folder_path
aug_setting["data_aug"]["fluid_aug"]["atlas_to_folder"] = reg_res_folder_path
aug_setting.write_JSON(aug_setting_path)
generate_moving_momentum_txt(reg_pair_list_txt,reg_res_folder_path,aug_input_txt,aug_name_txt,reg_name_list_txt,affine_path)
return aug_input_txt,aug_name_txt,aug_output_path
def do_augmentation(input_txt, input_name_txt, setting_folder_path, aug_output_path,gpu_list):
aug_setting_path = os.path.join(setting_folder_path,"data_aug_setting.json")
mermaid_setting_path = os.path.join(setting_folder_path,"mermaid_nonp_settings.json")
assert os.path.isfile(aug_setting_path), "the aug setting json {} is not found".format(aug_setting_path)
aug_setting = pars.ParameterDict()
aug_setting.load_JSON(aug_setting_path)
task_type = aug_setting["data_aug"]["fluid_aug"]["task_type"]
num_process = len(gpu_list)
if task_type == "rand_sampl":
max_aug_num = aug_setting["data_aug"]["max_aug_num"]
max_aug_num_per_process = round(max_aug_num/num_process)
aug_setting["data_aug"]["max_aug_num"]=max_aug_num_per_process
aug_setting_mp_path = os.path.join(setting_folder_path,"data_aug_setting_mutli_process.json")
aug_setting.write_ext_JSON(aug_setting_mp_path)
processes = []
for i in range(num_process):
cmd = "python gen_aug_samples.py "
cmd += "-t={} -n={} -as={} -ms={} -o={} -g={}\n".format(input_txt,input_name_txt,aug_setting_mp_path,mermaid_setting_path,aug_output_path,int(gpu_list[i]))
p = subprocess.Popen(cmd, shell=True)
processes.append(p)
time.sleep(1)
else:
num_process = split_txt(input_txt, num_process, aug_output_path,"aug_p")
split_txt(input_name_txt, num_process, aug_output_path,"aug_np")
sub_input_txt_list = [os.path.join(aug_output_path, 'aug_p{}.txt'.format(i)) for i in range(num_process)]
sub_input_name_txt_list = [os.path.join(aug_output_path, 'aug_np{}.txt'.format(i)) for i in range(num_process)]
processes = []
for i in range(num_process):
cmd = "python gen_aug_samples.py "
cmd += "-t={} -n={} -as={} -ms={} -o={} -g={}\n".format(sub_input_txt_list[i],sub_input_name_txt_list[i], aug_setting_path, mermaid_setting_path,
aug_output_path,int(gpu_list[i]))
p = subprocess.Popen(cmd, shell=True)
processes.append(p)
time.sleep(1)
exit_codes = [p.wait() for p in processes]
def pipeline(args):
"""
set running env and run the task
:param args: the parsed arguments
:param registration_pair_list: list of registration pairs, [source_list, target_list, lsource_list, ltarget_list]
:return: None
"""
setting_folder_path, reg_pair_list_txt, reg_name_list_txt=init_reg_env(args)
do_registration(reg_pair_list_txt,reg_name_list_txt, setting_folder_path,args.task_output_path,args.gpu_id_list)
aug_input_txt,aug_name_txt, aug_output_path = init_aug_env(reg_pair_list_txt,reg_name_list_txt,args.task_output_path,setting_folder_path)
do_augmentation(aug_input_txt,aug_name_txt,setting_folder_path, aug_output_path, args.gpu_id_list)
if __name__ == '__main__':
"""
Though the purpose of this script is to provide demo, it is a generalized interface for fluid-based data augmentation and interpolation
The augmentation/interpolatin include two parts
1. do fluid-based registration with either optimization methods or learning methods with pre-trained models.
2. do data augmentation via random sampling on geodesic space and time axis
or do data inter-/extra-polation with given direction and time point from geodesic space
it will call two script: demo_for_easyreg_eval.py and gen_aug_samples.py
so setting files for both tasks are need to be provided, see demo for details
As high precision registration is not necessary for data-augmentation, the default setting will be fine for most cases.
Of course, feel free to fine tune the multi_gaussian_stds and iterations if the task is to do data interpolation.
In the case that a lot of unlabeled data is available, we suggest to train a network via demo_for_easyreg_train.py first,
which would provide fast interpolation for data augmentation. Otherwise, the optimization option is recommended.
For file_txt, two input formats are supported:
1) aug_by_line: input txt where each line refer to a path of source image, paths of target images and the source label (string "None" if not exist), the labels of target images(None if not exist)
the augmentation takes place for each line
2) aug_by_file: input txt where each line refer to a image and corresponding label (string "None" if not exist)
the augmentation takes place among lines
For the name_txt (optional, will use the filename if not provided) include the fname for each image( to avoid confusion of source images with the same filename)
1) aug_by_line: each line include a source name, target names
2) aug_by_file: each line include a image name
All the settings should be given in the setting folder.
We support both learning-based and optimization based registration,
for the learning-based method, the pretrained model path should be provided in cur_task_setting.json
Arguments:
demo related:
--run_demo: run the demo
--demo_name: opt_lddmm_lpba/learnt_lddmm_oai
--gpu_id_list/ -g: gpu_id_list to use
other arguments:
--file_txt/-txt: the input txt recording the file path
--name_txt/-txt: the input txt recording the file name
--txt_format: aug_by_file/aug_by_line
--max_size_of_target_set_to_reg: max size of the target set for each source image, set -1 if there is no constraint
--max_size_of_pair_to_reg: max size of pair for registration, set -1 if there is no constraint, in that case the potential pair number would be N*(N-1) if txt_format is set as aug_by_file
--setting_folder_path/-ts :path of the folder where settings are saved
--task_output_path/ -o: the path of output folder
"""
import argparse
parser = argparse.ArgumentParser(description='An easy interface for evaluate various registration methods')
parser.add_argument("--run_demo", required=False, action='store_true', help='run demo')
parser.add_argument('--demo_name', required=False, type=str, default='opt_lddmm_lpba',
help='opt_lddmm_lpba/learnt_lddmm_oai')
# ---------------------------------------------------------------------------------------------------------------------------
parser.add_argument('-ts', '--setting_folder_path', required=False, type=str,
default="",
help='path of the folder where settings are saved,should include cur_task_setting.json,data_aug_setting, mermaid_affine_settings(optional) and mermaid_nonp_settings(optional)')
parser.add_argument('-t','--file_txt', required=False, default="", type=str,
help='the txt file recording the file to augment')
parser.add_argument('-n', '--name_txt', required=False, default=None, type=str,
help='the txt file recording the corresponding file name')
parser.add_argument('-f','--txt_format', required=False, default="aug_by_file", type=str,
help='txt format, aug_by_line/aug_by_file')
parser.add_argument('-mt','--max_size_of_target_set_to_reg', required=False, default=10, type=int,
help='max size of the target set for each source image, set -1 if there is no constraint')
parser.add_argument('-ma','--max_size_of_pair_to_reg', required=False, default=-1, type=int,
help='max size of pair for registration, set -1 if there is no constraint, in that case the potential pair number would be N*(N-1) if txt_format is set as aug_by_file')
parser.add_argument('-o', "--task_output_path", required=False, default="", type=str,help='the output path')
parser.add_argument('-g', "--gpu_id_list", nargs='+', required=False, default=None, help='list of gpu id to use')
args = parser.parse_args()
print(args)
run_demo = args.run_demo
demo_name = args.demo_name
file_txt = args.file_txt
txt_format = args.txt_format
if run_demo:
assert demo_name in ["opt_lddmm_lpba","learnt_lddmm_oai","learnt_lddmm_oai_interpolation"]
assert os.path.isfile(file_txt) or run_demo,"file not exist"
assert txt_format in ["aug_by_line","aug_by_file"]
pipeline(args)
| 12,841 | 52.066116 | 201 | py |
easyreg | easyreg-master/demo/demo_for_easyreg_train.py | import matplotlib as matplt
matplt.use('Agg')
import os, sys
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('../easy_reg'))
#sys.path.insert(0,os.path.abspath('../mermaid'))
import numpy as np
import torch
import random
torch.backends.cudnn.benchmark=True
import tools.module_parameters as pars
from abc import ABCMeta, abstractmethod
from easyreg.piplines import run_one_task
class BaseTask():
__metaclass__ = ABCMeta
def __init__(self,name):
self.name = name
@abstractmethod
def save(self):
pass
class DataTask(BaseTask):
"""
base module for data setting files (.json)
"""
def __init__(self,name,path='../settings/base_data_settings.json'):
super(DataTask,self).__init__(name)
self.data_par = pars.ParameterDict()
self.data_par.load_JSON(path)
def save(self, path='../settings/data_settings.json'):
self.data_par.write_ext_JSON(path)
class ModelTask(BaseTask):
"""
base module for task setting files (.json)
"""
def __init__(self,name,path='../settings/base_task_settings.json'):
super(ModelTask,self).__init__(name)
self.task_par = pars.ParameterDict()
self.task_par.load_JSON(path)
def save(self,path= '../settings/task_settings.json'):
self.task_par.write_ext_JSON(path)
def init_train_env(setting_path,output_root_path, task_name, data_task_name=None):
"""
create train environment.
:param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting)
:param output_root_path: the output path
:param data_task_name: data task name i.e. lung_reg_task , oai_reg_task
:param task_name: task name i.e. run_training_vsvf_task, run_training_rdmm_task
:return:
"""
dm_json_path = os.path.join(setting_path, 'cur_data_setting.json')
tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json')
assert os.path.isfile(tsm_json_path),"task setting not exists"
dm = DataTask('task_reg',dm_json_path) if os.path.isfile(dm_json_path) else None
tsm = ModelTask('task_reg',tsm_json_path)
data_task_name = data_task_name if len(data_task_name) else 'custom'
data_task_path = os.path.join(output_root_path,data_task_name)
if dm is not None:
dm.data_par['datapro']['dataset']['output_path'] = output_root_path
dm.data_par['datapro']['dataset']['task_name'] = data_task_name
tsm.task_par['tsk_set']['task_name'] = task_name
tsm.task_par['tsk_set']['output_root_path'] = data_task_path
if tsm.task_par['tsk_set']['model']=='reg_net'and 'mermaid' in tsm.task_par['tsk_set']['method_name']:
mermaid_setting_json = tsm.task_par['tsk_set']['reg']['mermaid_net']['mermaid_net_json_pth']
if len(mermaid_setting_json) == 0:
tsm.task_par['tsk_set']['reg']['mermaid_net']['mermaid_net_json_pth'] = os.path.join(setting_path,'mermaid_nonp_settings.json')
return dm, tsm
def addition_settings_for_two_stage_training(dm, tsm):
"""
addition settings when perform two-stage training, we assume the affine is the first stage, a non-linear method is the second stage
:param dm: ParameterDict, data processing setting (not used for now)
:param tsm: ParameterDict, task setting
:return: tuple of ParameterDict, datapro (optional) and tsk_set
"""
if args.affine_stage_in_two_stage_training:
tsm.task_par['tsk_set']['method_name'] = 'affine_sym'
if args.next_stage_in_two_stage_training:
data_task_path = tsm.task_par['tsk_set']['output_root_path']
task_name = tsm.task_par['tsk_set']['task_name'].replace('_stage2_nonp','_stage1_affine')
task_output_path = os.path.join(data_task_path, task_name)
tsm.task_par['tsk_set']['continue_train'] = False
tsm.task_par['tsk_set']['reg']['mermaid_net']['using_affine_init'] = True
tsm.task_par['tsk_set']['reg']['mermaid_net']['affine_init_path'] = os.path.join(task_output_path,
'checkpoints/model_best.pth.tar')
return dm, tsm
def backup_settings(args):
"""
The settings saved in setting_folder_path/task_name/cur_data_setting.json and setting_folder_path/task_name/cur_task_setting.json
:param args:
:return: None
"""
setting_folder_path = args.setting_folder_path
dm_json_path = os.path.join(setting_folder_path, 'cur_data_setting.json')
tsm_json_path = os.path.join(setting_folder_path, 'cur_task_setting.json')
dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None
tsm = ModelTask('task_reg', tsm_json_path)
task_name = args.task_name_record
setting_backup = os.path.join(setting_folder_path, task_name+'_backup')
os.makedirs(setting_backup, exist_ok=True)
dm_backup_json_path = os.path.join(setting_backup, 'cur_data_setting.json')
tsm_backup_json_path =os.path.join(setting_backup,'cur_task_setting.json')
if tsm.task_par['tsk_set']['model']=='reg_net' and 'mermaid' in tsm.task_par['tsk_set']['method_name']:
mermaid_backup_json_path = os.path.join(setting_backup, 'mermaid_nonp_settings.json')
mermaid_setting_json = tsm.task_par['tsk_set']['reg']['mermaid_net']['mermaid_net_json_pth']
if len(mermaid_setting_json)==0:
mermaid_setting_json = os.path.join(setting_folder_path,'mermaid_nonp_settings.json')
mermaid_setting =pars.ParameterDict()
mermaid_setting.load_JSON(mermaid_setting_json)
mermaid_setting.write_ext_JSON(mermaid_backup_json_path)
tsm.save(tsm_backup_json_path)
if dm is not None:
dm.save(dm_backup_json_path)
def __do_registration_train(args,pipeline=None):
"""
set running env and run the task
:param args: the parsed arguments
:param pipeline:a Pipeline object, only used for two-stage training, the pipeline of the first stage (including dataloader) would be pass to the second stage
:return: a Pipeline object
"""
output_root_path = args.output_root_path
task_name = args.task_name
data_task_name = args.data_task_name
setting_folder_path = args.setting_folder_path
data_task_path = os.path.join(output_root_path,data_task_name)
task_output_path = os.path.join(data_task_path,task_name)
os.makedirs(task_output_path, exist_ok=True)
dm, tsm = init_train_env(setting_folder_path,output_root_path,task_name,data_task_name)
dm, tsm = addition_settings_for_two_stage_training(dm, tsm)
tsm.task_par['tsk_set']['gpu_ids'] = args.gpu_id
dm_json_path = os.path.join(task_output_path, 'cur_data_setting.json') if dm is not None else None
tsm_json_path = os.path.join(task_output_path, 'cur_task_setting.json')
tsm.save(tsm_json_path)
if dm is not None:
dm.save(dm_json_path)
data_loaders = pipeline.data_loaders if pipeline is not None else None
pipeline = run_one_task(tsm_json_path, dm_json_path,data_loaders)
return pipeline
def set_seed_for_demo(args):
""" reproduce the training demo"""
seed = 2018
if args.is_demo:
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def do_registration_train(args):
"""
a interface for setting one-stage training or two stage training (include affine)
:param args: the parsed arguments
:return: None
"""
set_seed_for_demo(args)
task_name = args.task_name
args.task_name_record = task_name
backup_settings(args)
pipeline = None
args.affine_stage_in_two_stage_training = False
args.next_stage_in_two_stage_training = False
if args.train_affine_first:
args.affine_stage_in_two_stage_training = True
args.task_name = task_name +'_stage1_affine'
pipeline = __do_registration_train(args)
pipeline.clean_up()
#torch.cuda.empty_cache()
args.affine_stage_in_two_stage_training = False
args.next_stage_in_two_stage_training =True
args.setting_folder_path = os.path.join(args.setting_folder_path, task_name+'_backup')
args.task_name = task_name+'_stage2_nonp'
__do_registration_train(args,pipeline)
if __name__ == '__main__':
"""
A training interface for learning methods.
The method support list : mermaid-related methods (vSVF,LDDMM,RDMM), voxel-morph (cvpr and miccai)
Assume there is three level folder, output_root_path/ data_task_name/ task_name
In data_task_folder, you must include train/val/test/debug folders, for details please refer to doc/source/notes/preapre_data.rst
Arguments:
--output_root_path/ -o: the path of easyreg output root folder
--data_task_name/ -dtn: data task name i.e. lung_reg_task , oai_reg_task,
--task_name / -tn: task name i.e. run_training_vsvf_task, run_training_rdmm_task
--setting_folder_path/ -ts: path of the folder where settings are saved,should include cur_task_setting.json, mermaid_affine_settings.json(optional) and mermaid_nonp_settings(optional)
--train_affine_first: train affine network first, then train non-parametric network
--gpu_id/ -g: gpu_id to use
--is_demo: reproduce the tutorial result
"""
import argparse
parser = argparse.ArgumentParser(description="An easy interface for training registration models")
parser.add_argument('-o','--output_root_path', required=False, type=str,
default=None,help='the path of output folder')
parser.add_argument('-dtn','--data_task_name', required=False, type=str,
default='',help='the name of the data related task (like subsampling)')
parser.add_argument('-tn','--task_name', required=False, type=str,
default=None,help='the name of the task')
parser.add_argument('-ts','--setting_folder_path', required=False, type=str,
default=None,help='path of the folder where settings are saved,should include cur_task_setting.json, mermaid_affine_settings.json(optional) and mermaid_nonp_settings(optional)')
parser.add_argument('--train_affine_first',required=False,action='store_true',
help='train affine network first, then train non-parametric network')
parser.add_argument('-g',"--gpu_id",required=False,type=int,default=0,help='gpu_id to use')
parser.add_argument('--is_demo', required=False,action='store_true', help="reproduce the tutorial result")
args = parser.parse_args()
print(args)
do_registration_train(args)
| 10,865 | 42.464 | 201 | py |
easyreg | easyreg-master/demo/demo_for_seg_train.py | import matplotlib as matplt
matplt.use('Agg')
import os, sys
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('../easyreg'))
import numpy as np
import torch
import random
import tools.module_parameters as pars
from abc import ABCMeta, abstractmethod
from easyreg.piplines import run_one_task
torch.backends.cudnn.benchmark=True
class BaseTask():
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
@abstractmethod
def save(self):
pass
class DataTask(BaseTask):
"""
base module for data setting files (.json)
"""
def __init__(self, name, path='../settings/base_data_settings.json'):
super(DataTask, self).__init__(name)
self.data_par = pars.ParameterDict()
self.data_par.load_JSON(path)
def save(self, path='../settings/data_settings.json'):
self.data_par.write_ext_JSON(path)
class ModelTask(BaseTask):
"""
base module for task setting files (.json)
"""
def __init__(self, name, path='../settings/base_task_settings.json'):
super(ModelTask, self).__init__(name)
self.task_par = pars.ParameterDict()
self.task_par.load_JSON(path)
def save(self, path='../settings/task_settings.json'):
self.task_par.write_ext_JSON(path)
def init_train_env(setting_path,output_root_path, task_name, data_task_name=None):
"""
create train environment.
:param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting)
:param output_root_path: the output path
:param data_task_name: data task name i.e. lung_seg_task , oai_seg_task
:param task_name: task name i.e. run_unet, run_with_ncc_loss
:return:
"""
dm_json_path = os.path.join(setting_path, 'cur_data_setting.json')
tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json')
assert os.path.isfile(tsm_json_path),"task setting {} not exists".format(tsm_json_path)
dm = DataTask('task_reg',dm_json_path) if os.path.isfile(dm_json_path) else None
tsm = ModelTask('task_reg',tsm_json_path)
data_task_name = data_task_name if len(data_task_name) else 'custom'
data_task_path = os.path.join(output_root_path,data_task_name)
if dm is not None:
dm.data_par['datapro']['dataset']['output_path'] = output_root_path
dm.data_par['datapro']['dataset']['task_name'] = data_task_name
tsm.task_par['tsk_set']['task_name'] = task_name
tsm.task_par['tsk_set']['output_root_path'] = data_task_path
return dm, tsm
def backup_settings(args):
"""
The settings saved in setting_folder_path/task_name/cur_data_setting.json and setting_folder_path/task_name/cur_task_setting.json
:param args:
:return: None
"""
setting_folder_path = args.setting_folder_path
dm_json_path = os.path.join(setting_folder_path, 'cur_data_setting.json')
tsm_json_path = os.path.join(setting_folder_path, 'cur_task_setting.json')
dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None
tsm = ModelTask('task_reg', tsm_json_path)
task_name = args.task_name_record
setting_backup = os.path.join(setting_folder_path, task_name+'_backup')
os.makedirs(setting_backup, exist_ok=True)
dm_backup_json_path = os.path.join(setting_backup, 'cur_data_setting.json')
tsm_backup_json_path =os.path.join(setting_backup,'cur_task_setting.json')
tsm.save(tsm_backup_json_path)
if dm is not None:
dm.save(dm_backup_json_path)
def __do_segmentation_train(args,pipeline=None):
"""
set running env and run the task
:param args: the parsed arguments
:param pipeline:a Pipeline object
:return: a Pipeline object
"""
output_root_path = args.output_root_path
task_name = args.task_name
data_task_name = args.data_task_name
setting_folder_path = args.setting_folder_path
data_task_path = os.path.join(output_root_path,data_task_name)
task_output_path = os.path.join(data_task_path,task_name)
os.makedirs(task_output_path, exist_ok=True)
dm, tsm = init_train_env(setting_folder_path,output_root_path,task_name,data_task_name)
tsm.task_par['tsk_set']['gpu_ids'] = args.gpu_id
dm_json_path = os.path.join(task_output_path, 'cur_data_setting.json') if dm is not None else None
tsm_json_path = os.path.join(task_output_path, 'cur_task_setting.json')
tsm.save(tsm_json_path)
if dm is not None:
dm.save(dm_json_path)
data_loaders = pipeline.data_loaders if pipeline is not None else None
pipeline = run_one_task(tsm_json_path, dm_json_path,data_loaders)
return pipeline
def do_segmentation_train(args):
"""
:param args: the parsed arguments
:return: None
"""
task_name = args.task_name
args.task_name_record = task_name
backup_settings(args)
pipeline = None
__do_segmentation_train(args,pipeline)
if __name__ == '__main__':
"""
An interface for learning segmentation methods.
Assume there is three level folder, output_root_path/ data_task_name/ task_name
In data_task_folder, you must include train/val/test/debug folders, for details please refer to doc/source/notes/preapre_data.rst
Arguments:
--output_root_path/ -o: the path of output folder
--data_task_name/ -dtn: data task name i.e. lung_reg_task , oai_reg_task
--task_name / -tn: task name i.e. run_training_vsvf_task, run_training_rdmm_task
--setting_folder_path/ -ts: path of the folder where settings are saved,should include cur_task_setting.json
--gpu_id/ -g: gpu_id to use
"""
import argparse
parser = argparse.ArgumentParser(description="An easy interface for training segmentation models")
parser.add_argument('-o','--output_root_path', required=False, type=str,
default=None,help='the path of output folder')
parser.add_argument('-dtn','--data_task_name', required=False, type=str,
default='',help='the name of the data related task (like subsampling)')
parser.add_argument('-tn','--task_name', required=False, type=str,
default=None,help='the name of the task')
parser.add_argument('-ts','--setting_folder_path', required=False, type=str,
default=None,help='path of the folder where settings are saved,should include cur_task_setting.json)')
parser.add_argument('-g',"--gpu_id",required=False,type=int,default=0,help='gpu_id to use')
args = parser.parse_args()
print(args)
do_segmentation_train(args)
| 6,756 | 35.722826 | 152 | py |
easyreg | easyreg-master/demo/hack_v2.py | import os
import numpy as np
import torch
import SimpleITK as sitk
import pyvista as pv
from easyreg.net_utils import gen_identity_map
from tools.image_rescale import permute_trans
from tools.module_parameters import ParameterDict
from easyreg.lin_unpublic_net import model
from easyreg.utils import resample_image, get_transform_with_itk_format, dfield2bspline
from tools.visual_tools import save_3D_img_from_numpy
import mermaid.utils as py_utils
dirlab_landmarks_folder = "/playpen-raid1/zyshen/lung_reg/evaluate/dirlab_landmarks"
def resize_img(img, img_after_resize=None, is_mask=False):
"""
:param img: sitk input, factor is the outputs_ize/patched_sized
:param img_after_resize: list, img_after_resize, image size after resize in itk coordinate
:return:
"""
img_sz = img.GetSize()
if img_after_resize is not None:
img_after_resize = img_after_resize
else:
img_after_resize = img_sz
resize_factor = np.array(img_after_resize) / np.flipud(img_sz)
spacing_factor = (np.array(img_after_resize) - 1) / (np.flipud(img_sz) - 1)
resize = not all([factor == 1 for factor in resize_factor])
if resize:
resampler = sitk.ResampleImageFilter()
dimension = 3
factor = np.flipud(resize_factor)
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / spacing_factor[0]
matrix[1, 1] = 1. / spacing_factor[1]
matrix[2, 2] = 1. / spacing_factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_mask:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkLinear)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
return img_resampled, resize_factor
def resample_image_itk_by_spacing_and_size(
image,
output_spacing,
output_size,
output_type=None,
interpolator=sitk.sitkBSpline,
padding_value=-1024,
center_padding=True,
):
"""
Image resampling using ITK
:param image: simpleITK image
:param output_spacing: numpy array or tuple. Output spacing
:param output_size: numpy array or tuple. Output size
:param output_type: simpleITK output data type. If None, use the same as 'image'
:param interpolator: simpleITK interpolator (default: BSpline)
:param padding_value: pixel padding value when a transformed pixel is outside of the image
:return: tuple with simpleITK image and array with the resulting output spacing
"""
resampler = sitk.ResampleImageFilter()
resampler.SetOutputDirection(image.GetDirection())
resampler.SetSize(output_size)
resampler.SetDefaultPixelValue(padding_value)
resampler.SetInterpolator(interpolator)
resampler.SetOutputSpacing(np.array(output_spacing))
resampler.SetOutputPixelType(
output_type if output_type is not None else image.GetPixelIDValue()
)
factor = np.asarray(image.GetSpacing()) / np.asarray(output_spacing).astype(
np.float32
)
# Get new output origin
if center_padding:
real_output_size = np.round(
np.asarray(image.GetSize()) * factor + 0.0005
).astype(np.uint32)
diff = ((output_size - real_output_size) * np.asarray(output_spacing)) / 2
output_origin = np.asarray(image.GetOrigin()) - diff
# output_origin = output_origin - np.asarray(image.GetSpacing()) / 2 \
# + output_spacing / 2
else:
output_origin = np.asarray(image.GetOrigin())
resampler.SetOutputOrigin(output_origin)
return resampler.Execute(image)
def normalize_img(img, is_mask=False):
"""
:param img: numpy image
:return:
"""
if not is_mask:
img[img<-1000] = -1000
# img = (img - img.min())/(img.max()-img.min())
else:
img[img>400]=0
img[img != 0] = 1
return img
def preprocess(img_sitk,is_mask=False):
ori_source, ori_spacing, _ = load_ITK(path_pair[0])
ori_source = np.flip(sitk.GetArrayFromImage(ori_source), axis=(0))
ori_target, ori_spacing, _ = load_ITK(path_pair[1])
ori_target = np.flip(sitk.GetArrayFromImage(ori_target), axis=(0))
# Pad the one with smaller size
pad_size = ori_target.shape[0] - ori_source.shape[0]
if pad_size > 0:
ori_source = np.pad(ori_source, ((0, pad_size), (0, 0), (0, 0)), mode='constant', constant_values=-1024)
else:
ori_target = np.pad(ori_target, ((0, -pad_size), (0, 0), (0, 0)), mode='constant', constant_values=-1024)
assert ori_source.shape == ori_target.shape, "The shape of source and target image should be the same!"
source, _, _ = resample(ori_source, ori_spacing, spacing)
source[source < -1024] = -1024
target, new_spacing, _ = resample(ori_target, ori_spacing, spacing)
target[target < -1024] = -1024
if seg_bg:
bg_hu = np.min(source)
source_bg_seg, source_bbox = seg_bg_mask(source)
source[source_bg_seg == 0] = bg_hu
bg_hu = np.min(target)
target_bg_seg, source_bbox = seg_bg_mask(target)
target[target_bg_seg == 0] = bg_hu
total_voxel = np.prod(target.shape)
print("##########Area percentage of ROI:{:.2f}, {:.2f}".format(float(np.sum(source_bg_seg)) / total_voxel,
float(np.sum(target_bg_seg)) / total_voxel))
source_seg, _ = seg_lung_mask(source)
target_seg, _ = seg_lung_mask(target)
# Pad 0 if shape is smaller than desired size.
new_origin = np.array((0, 0, 0))
sz_diff = sz - source.shape
sz_diff[sz_diff < 0] = 0
pad_width = [[int(sz_diff[0] / 2), sz_diff[0] - int(sz_diff[0] / 2)],
[int(sz_diff[1] / 2), sz_diff[1] - int(sz_diff[1] / 2)],
[int(sz_diff[2] / 2), sz_diff[2] - int(sz_diff[2] / 2)]]
source = np.pad(source, pad_width, constant_values=-1024)
target = np.pad(target, pad_width, constant_values=-1024)
source_seg = np.pad(source_seg, pad_width, constant_values=0)
target_seg = np.pad(target_seg, pad_width, constant_values=0)
new_origin[sz_diff > 0] = -np.array(pad_width)[sz_diff > 0, 0]
# Crop if shape is greater than desired size.
sz_diff = source.shape - sz
bbox = [[int(sz_diff[0] / 2), int(sz_diff[0] / 2) + sz[0]],
[int(sz_diff[1] / 2), int(sz_diff[1] / 2) + sz[1]],
[int(sz_diff[2] / 2), int(sz_diff[2] / 2) + sz[2]]]
source = source[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], bbox[2][0]:bbox[2][1]]
target = target[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], bbox[2][0]:bbox[2][1]]
source_seg = source_seg[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], bbox[2][0]:bbox[2][1]]
target_seg = target_seg[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], bbox[2][0]:bbox[2][1]]
new_origin[sz_diff > 0] = np.array(bbox)[sz_diff > 0, 0]
source = normalize_intensity(source)
target = normalize_intensity(target)
return source, target, source_seg, target_seg, new_origin, new_spacing
return sitk_img
def convert_itk_to_support_deepnet(img_sitk, is_mask=False,device=torch.device("cuda:0")):
img_sz_after_resize = [160]*3
img_sitk = sitk.GetImageFromArray(sitk.GetArrayFromImage(img_sitk))
img_after_resize,_ = resize_img(img_sitk,img_sz_after_resize, is_mask=is_mask)
img_numpy = sitk.GetArrayFromImage(img_after_resize)
return torch.Tensor(img_numpy.astype(np.float32))[None][None].to(device)
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def convert_transform_into_itk_bspline(transform,spacing,moving_ref,target_ref):
if type(transform) == torch.Tensor:
transform = transform.detach().cpu().numpy()
cur_trans = transform[0]
img_sz = np.array(transform.shape[2:])
moving_spacing_ref = moving_ref.GetSpacing()
moving_direc_ref = moving_ref.GetDirection()
moving_orig_ref = moving_ref.GetOrigin()
target_spacing_ref = target_ref.GetSpacing()
target_direc_ref = target_ref.GetDirection()
target_orig_ref = target_ref.GetOrigin()
id_np_moving = identity_map(img_sz, np.flipud(moving_spacing_ref))
id_np_target = identity_map(img_sz, np.flipud(target_spacing_ref))
factor = np.flipud(moving_spacing_ref) / spacing
factor = factor.reshape(3,1,1,1)
moving_direc_matrix = np.array(moving_direc_ref).reshape(3, 3)
target_direc_matrix = np.array(target_direc_ref).reshape(3, 3)
cur_trans = np.matmul(moving_direc_matrix, permute_trans(id_np_moving + cur_trans * factor).reshape(3, -1)) \
- np.matmul(target_direc_matrix, permute_trans(id_np_target).reshape(3, -1))
cur_trans = cur_trans.reshape(id_np_moving.shape)
bias = np.array(target_orig_ref)-np.array(moving_orig_ref)
bias = -bias.reshape(3,1,1,1)
transform_physic = cur_trans +bias
trans = get_transform_with_itk_format(transform_physic,target_spacing_ref, target_orig_ref,target_direc_ref)
#sitk.WriteTransform(trans, saving_path)
# Retrive the DField from the Transform
dfield = trans.GetDisplacementField()
# Fitting a BSpline from the Deformation Field
bstx = dfield2bspline(dfield, verbose=True)
return bstx
def convert_output_into_itk_support_format(source_itk,target_itk, l_source_itk, l_target_itk, phi,spacing):
phi = (phi+1)/2 # here we assume the phi take the [-1,1] coordinate, usually used by deep network
new_phi = None
warped = None
l_warped = None
new_spacing = None
if source_itk is not None:
s = sitk.GetArrayFromImage(source_itk)
t = sitk.GetArrayFromImage(target_itk)
sz_t = [1, 1] + list(t.shape)
source = torch.from_numpy(s[None][None]).to(phi.device)
new_phi, new_spacing = resample_image(phi, spacing, sz_t, 1, zero_boundary=True)
warped = py_utils.compute_warped_image_multiNC(source, new_phi, new_spacing, 1, zero_boundary=True)
if l_source_itk is not None:
ls = sitk.GetArrayFromImage(l_source_itk)
lt = sitk.GetArrayFromImage(l_target_itk)
sz_lt = [1, 1] + list(lt.shape)
l_source = torch.from_numpy(ls[None][None]).to(phi.device)
if new_phi is None:
new_phi, new_spacing = resample_image(phi, spacing, sz_lt, 1, zero_boundary=True)
l_warped = py_utils.compute_warped_image_multiNC(l_source, new_phi, new_spacing, 0, zero_boundary=True)
id_map = gen_identity_map(warped.shape[2:], resize_factor=1., normalized=True).cuda()
id_map = (id_map[None] + 1) / 2.
disp = new_phi - id_map
bspline_itk = None #convert_transform_into_itk_bspline(disp,new_spacing,source_itk, target_itk)
return new_phi, warped,l_warped, new_spacing, bspline_itk
def predict(source_itk, target_itk,source_mask_itk=None, target_mask_itk=None, model_path="",device=torch.device("cuda:0")):
setting_path = "./lung_reg/task_setting.json"
opt = ParameterDict()
opt.load_JSON(setting_path)
source = convert_itk_to_support_deepnet(source_itk,device=device)
target = convert_itk_to_support_deepnet(target_itk, device=device)
source_mask = convert_itk_to_support_deepnet(source_mask_itk,is_mask=True) if source_mask_itk is not None else None
target_mask = convert_itk_to_support_deepnet(target_mask_itk,is_mask=True) if target_mask_itk is not None else None
network = model(img_sz=[160,160,160],opt=opt)
network.load_pretrained_model(model_path)
network.to(device)
network.train(False)
with torch.no_grad():
warped, composed_map, affine_img = network.forward(source, target, source_mask, target_mask)
inv_warped, composed_inv_map, inv_affine_img = network.forward(target, source, target_mask, source_mask)
spacing = 1./(np.array(warped.shape[2:])-1)
del network
full_composed_map, full_warped,l_full_warped, _, bspline_itk = convert_output_into_itk_support_format(source_itk,target_itk, source_mask_itk, target_mask_itk, composed_map,spacing)
full_composed_map = full_composed_map.detach().cpu().squeeze().numpy()
full_inv_composed_map, full_inv_warped,l_full_inv_warped, _, inv_bspline_itk = convert_output_into_itk_support_format(target_itk,source_itk, target_mask_itk, source_mask_itk, composed_inv_map,spacing)
full_inv_composed_map = full_inv_composed_map.detach().cpu().squeeze().numpy()
# save_3D_img_from_numpy(full_inv_warped.squeeze().cpu().numpy(),"/playpen-raid1/zyshen/debug/debug_lin_model2.nii.gz",
# source_itk.GetSpacing(), source_itk.GetOrigin(), source_itk.GetDirection())
# save_3D_img_from_numpy(sitk.GetArrayFromImage(source_itk),
# "/playpen-raid1/zyshen/debug/debug_lin_source.nii.gz",
# source_itk.GetSpacing(), source_itk.GetOrigin(), source_itk.GetDirection())
return {"phi": full_composed_map, "inv_phi": full_inv_composed_map,"bspline":bspline_itk, "inv_bspline":inv_bspline_itk}
def evaluate_on_dirlab(inv_map,moving_itk, target_itk,dirlab_id):
MAPPING = {
"12042G": "copd_000006",
"12105E": "copd_000007",
"12109M": "copd_000008",
"12239Z": "copd_000009",
"12829U": "copd_000010",
"13216S": "copd_000001",
"13528L": "copd_000002",
"13671Q": "copd_000003",
"13998W": "copd_000004",
"17441T": "copd_000005"
}
COPD_ID = [
"copd_000001",
"copd_000002",
"copd_000003",
"copd_000004",
"copd_000005",
"copd_000006",
"copd_000007",
"copd_000008",
"copd_000009",
"copd_000010"
]
def get_dirlab_landmark(case_id):
assert case_id in COPD_ID
exp_landmark_path = os.path.join(dirlab_landmarks_folder, case_id + "_EXP.vtk")
insp_landmark_path = os.path.join(dirlab_landmarks_folder, case_id + "_INSP.vtk")
exp_landmark = read_vtk(exp_landmark_path)["points"]
insp_landmark = read_vtk(insp_landmark_path)["points"]
return exp_landmark, insp_landmark
def read_vtk(path):
data = pv.read(path)
data_dict = {}
data_dict["points"] = data.points.astype(np.float32)
data_dict["faces"] = data.faces.reshape(-1, 4)[:, 1:].astype(np.int32)
for name in data.array_names:
try:
data_dict[name] = data[name]
except:
pass
return data_dict
def warp_points(points, inv_map, moving_itk, target_itk):
"""
in easyreg the inv transform coord is from [0,1], so here we need to read mesh in voxel coord and then normalized it to [0,1],
the last step is to transform warped mesh into word/ voxel coord
the transformation map use default [0,1] coord unless the ref img is provided
here the transform map is in inversed voxel space or in inversed physical space ( width,height, depth)
but the points should be in standard voxel/itk space (depth, height, width)
:return:
"""
import numpy as np
import torch.nn.functional as F
# first make everything in voxel coordinate, depth, height, width
img_sz = np.array(inv_map.shape[1:])
standard_spacing = 1 / (img_sz - 1) # width,height, depth
standard_spacing = np.flipud(standard_spacing) # depth, height, width
moving_img = moving_itk
moving_spacing = moving_img.GetSpacing()
moving_spacing = np.array(moving_spacing)
moving_origin = moving_img.GetOrigin()
moving_origin = np.array(moving_origin)
target_img = target_itk
target_spacing = target_img.GetSpacing()
target_spacing = np.array(target_spacing)
target_origin = target_img.GetOrigin()
target_origin = np.array(target_origin)
moving = sitk.GetArrayFromImage(moving_img)
slandmark_index = (points-moving_origin) / moving_spacing
for coord in slandmark_index:
coord_int = [int(c) for c in coord]
moving[coord_int[2],coord_int[1],coord_int[0]] = 2.
save_3D_img_from_numpy(moving,"/playpen-raid2/zyshen/debug/{}_padded.nii.gz".format(dirlab_id+"_moving"),
spacing=moving_img.GetSpacing(), orgin=moving_img.GetOrigin(), direction=moving_img.GetDirection())
points = (points - moving_origin) / moving_spacing * standard_spacing
points = points * 2 - 1
grid_sz = [1] + [points.shape[0]] + [1, 1, 3] # 1*N*1*1*3
grid = points.reshape(*grid_sz)
grid = torch.Tensor(grid).cuda()
inv_map = torch.Tensor(inv_map).cuda()
inv_map_sz = [1, 3] + list(img_sz) # width,height, depth
inv_map = inv_map.view(*inv_map_sz) # 1*3*X*Y*Z
points_wraped = F.grid_sample(inv_map, grid, mode='bilinear', padding_mode='border',
align_corners=True) # 1*3*N*1*1
points_wraped = points_wraped.detach().cpu().numpy()
points_wraped = np.transpose(np.squeeze(points_wraped))
points_wraped = np.flip(points_wraped, 1) / standard_spacing * target_spacing + target_origin
warp = sitk.GetArrayFromImage(target_img)
wlandmark_index = (points_wraped - target_origin) / target_spacing
for coord in wlandmark_index:
coord_int = [int(c) for c in coord]
warp[coord_int[2], coord_int[1], coord_int[0]] = 2.
save_3D_img_from_numpy(warp, "/playpen-raid2/zyshen/debug/{}_debug.nii.gz".format("warp"))
return points_wraped
assert dirlab_id in MAPPING, "{} doesn't belong to ten dirlab cases:{}".format(dirlab_id, MAPPING.keys())
exp_landmark, insp_landmark = get_dirlab_landmark(MAPPING[dirlab_id])
warped_landmark = warp_points(exp_landmark, inv_map, moving_itk, target_itk)
diff = warped_landmark - insp_landmark
diff_norm = np.linalg.norm(diff, ord=2, axis=1)
print("before register landmark error norm: {}".format(
np.linalg.norm(exp_landmark - insp_landmark, ord=2, axis=1).mean()))
print("after register landmark error norm: {}".format(diff_norm.mean()))
def get_file_name(img_path):
get_fn = lambda x: os.path.split(x)[-1]
file_name = get_fn(img_path).split(".")[0]
return file_name
if __name__ == "__main__":
source_path = "/playpen-raid1/Data/DIRLABCasesHighRes/12042G_EXP_STD_USD_COPD.nrrd"
target_path = "/playpen-raid1/Data/DIRLABCasesHighRes/12042G_INSP_STD_USD_COPD.nrrd"
source_mask_path = "/playpen-raid1/lin.tian/data/raw/DIRLABCasesHighRes/copd6/copd6_EXP_label.nrrd"
target_mask_path = "/playpen-raid1/lin.tian/data/raw/DIRLABCasesHighRes/copd6/copd6_INSP_label.nrrd"
model_path = "./lung_reg/lin_model"
source_itk = sitk.ReadImage(source_path)
target_itk = sitk.ReadImage(target_path)
source_mask_itk = sitk.ReadImage(source_mask_path) if len(source_mask_path) else None
target_mask_itk = sitk.ReadImage(target_mask_path) if len(target_mask_path) else None
preprocessed_source_itk = preprocess(source_itk)
preprocessed_target_itk = preprocess(target_itk)
#sitk.WriteImage(preprocessed_source_itk,"/playpen-raid1/zyshen/debug/12042G_preprocessed.nii.gz")
if source_mask_itk is not None and target_mask_itk is not None:
preprocessed_source_mask_itk = preprocess(source_mask_itk,is_mask=True)
preprocessed_target_mask_itk = preprocess(target_mask_itk,is_mask=True)
else:
preprocessed_source_mask_itk = None
preprocessed_target_mask_itk = None
output_dict = predict(preprocessed_source_itk, preprocessed_target_itk,preprocessed_source_mask_itk,preprocessed_target_mask_itk,
model_path=model_path)
dirlab_id = get_file_name(source_path).split("_")[0]
evaluate_on_dirlab(output_dict["inv_phi"], preprocessed_source_itk, preprocessed_target_itk, dirlab_id) | 21,573 | 43.209016 | 204 | py |
easyreg | easyreg-master/demo/oai_eval.py | import os
import numpy as np
import torch
import SimpleITK as sitk
from tools.module_parameters import ParameterDict
from easyreg.mermaid_net import MermaidNet as model
from easyreg.utils import resample_image
import mermaid.utils as py_utils
def resize_img(img, img_after_resize=None, is_mask=False):
"""
:param img: sitk input, factor is the outputs_ize/patched_sized
:param img_after_resize: list, img_after_resize, image size after resize in itk coordinate
:return:
"""
img_sz = img.GetSize()
if img_after_resize is not None:
img_after_resize = img_after_resize
else:
img_after_resize = img_sz
resize_factor = np.array(img_after_resize) / np.flipud(img_sz)
spacing_factor = (np.array(img_after_resize) - 1) / (np.flipud(img_sz) - 1)
resize = not all([factor == 1 for factor in resize_factor])
if resize:
resampler = sitk.ResampleImageFilter()
dimension = 3
factor = np.flipud(resize_factor)
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / spacing_factor[0]
matrix[1, 1] = 1. / spacing_factor[1]
matrix[2, 2] = 1. / spacing_factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_mask:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkLinear)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
return img_resampled, resize_factor
def convert_itk_to_support_deepnet(img_sitk, is_mask=False,device=torch.device("cuda:0")):
img_sz_after_resize = [80,192,192]
img_sitk = sitk.GetImageFromArray(sitk.GetArrayFromImage(img_sitk))
img_after_resize,_ = resize_img(img_sitk,img_sz_after_resize, is_mask=is_mask)
img_numpy = sitk.GetArrayFromImage(img_after_resize)
if not is_mask:
img_numpy = img_numpy*2-1
return torch.Tensor(img_numpy.astype(np.float32))[None][None].to(device)
def convert_output_into_itk_support_format(source_itk,target_itk, l_source_itk, l_target_itk, phi,spacing):
phi = (phi+1)/2 # here we assume the phi take the [-1,1] coordinate, usually used by deep network
new_phi = None
warped = None
l_warped = None
new_spacing = None
if source_itk is not None:
s = sitk.GetArrayFromImage(source_itk)
t = sitk.GetArrayFromImage(target_itk)
sz_t = [1, 1] + list(t.shape)
source = torch.from_numpy(s[None][None]).to(phi.device)
new_phi, new_spacing = resample_image(phi, spacing, sz_t, 1, zero_boundary=True)
warped = py_utils.compute_warped_image_multiNC(source, new_phi, new_spacing, 1, zero_boundary=True)
if l_source_itk is not None:
ls = sitk.GetArrayFromImage(l_source_itk).astype(np.float32)
lt = sitk.GetArrayFromImage(l_target_itk).astype(np.float32)
sz_lt = [1, 1] + list(lt.shape)
l_source = torch.from_numpy(ls[None][None]).to(phi.device)
if new_phi is None:
new_phi, new_spacing = resample_image(phi, spacing, sz_lt, 1, zero_boundary=True)
l_warped = py_utils.compute_warped_image_multiNC(l_source, new_phi, new_spacing, 0, zero_boundary=True)
return new_phi, warped, l_warped
def predict(source_itk, target_itk,source_mask_itk=None, target_mask_itk=None,setting_path="", model_path="",device=torch.device("cuda:0")):
opt = ParameterDict()
opt.load_JSON(setting_path)
source = convert_itk_to_support_deepnet(source_itk,device=device)
target = convert_itk_to_support_deepnet(target_itk, device=device)
source_mask = convert_itk_to_support_deepnet(source_mask_itk,is_mask=True) if source_mask_itk is not None else None
target_mask = convert_itk_to_support_deepnet(target_mask_itk,is_mask=True) if target_mask_itk is not None else None
network = model(img_sz=[80,192,192],opt=opt)
network.load_pretrained_model(model_path)
network.to(device)
network.train(False)
with torch.no_grad():
warped, composed_map, _ = network.forward(source, target, source_mask, target_mask)
composed_inv_map = network.get_inverse_map(use_01=False)
spacing = 1./(np.array(warped.shape[2:])-1)
del network
full_inv_composed_map, full_inv_warped,l_full_inv_warped = convert_output_into_itk_support_format(target_itk,source_itk, target_mask_itk, source_mask_itk, composed_inv_map,spacing)
full_inv_composed_map = full_inv_composed_map.detach().cpu().squeeze().numpy()
# save_3D_img_from_itk(source_itk, "/playpen-raid1/zyshen/debug/debug_oai_model_source_itk.nii.gz")
# save_3D_img_from_itk(target_itk, "/playpen-raid1/zyshen/debug/debug_oai_model_target_itk.nii.gz")
# # save_3D_img_from_numpy(full_warped.squeeze().cpu().numpy(),
# # "/playpen-raid1/zyshen/debug/debug_lin_model_st.nii.gz",
# # target_itk.GetSpacing(), target_itk.GetOrigin(), target_itk.GetDirection())
# save_3D_img_from_numpy(full_inv_warped.squeeze().cpu().numpy(),"/playpen-raid1/zyshen/debug/debug_oai_model_ts.nii.gz",
# source_itk.GetSpacing(), source_itk.GetOrigin(), source_itk.GetDirection())
return full_inv_composed_map
def get_file_name(img_path):
get_fn = lambda x: os.path.split(x)[-1]
file_name = get_fn(img_path).split(".")[0]
return file_name
if __name__ == "__main__":
"""
input:
here we assume the input have been preprocessed (normalized into [0,1])
mesh_path(in source coordinate)
source path
target path
source_mask_path(optional)
target_mask_path(optional)
output: deformed mesh(in target coordinate)
"""
source_path = "/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9357383_20040927_SAG_3D_DESS_LEFT_016610250606_image.nii.gz"
target_path = "/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_image.nii.gz"
source_mask_path = "/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9357383_20040927_SAG_3D_DESS_LEFT_016610250606_label_all.nii.gz"
target_mask_path = "/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_rescaled/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_label_all.nii.gz"
setting_path = "./demo_settings/mermaid/eval_network_vsvf/cur_task_setting.json"
model_path = "./demo_saved_models/mermaid/eval_network_vsvf/model"
source_itk = sitk.ReadImage(source_path)
target_itk = sitk.ReadImage(target_path)
source_mask_itk = sitk.ReadImage(source_mask_path) if len(source_mask_path) else None
target_mask_itk = sitk.ReadImage(target_mask_path) if len(target_mask_path) else None
full_inv_composed_map = predict(source_itk, target_itk,source_mask_itk,target_mask_itk,setting_path, model_path=model_path)
| 7,086 | 48.559441 | 184 | py |
easyreg | easyreg-master/demo/gen_aug_samples.py | import matplotlib as matplt
matplt.use('Agg')
import sys,os
#os.environ["CUDA_VISIBLE_DEVICES"] = ''
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('../easyreg'))
sys.path.insert(0,os.path.abspath('.'))
import random
import torch
from mermaid.model_evaluation import evaluate_model
import mermaid.module_parameters as pars
from mermaid.utils import resample_image, compute_warped_image_multiNC, \
get_resampled_image,get_res_size_from_size, get_res_spacing_from_spacing,identity_map_multiN
import numpy as np
import SimpleITK as sitk
import nibabel as nib
from tools.image_rescale import save_image_with_given_reference
from easyreg.aug_utils import read_img_label_into_list
from easyreg.reg_data_utils import read_fname_list_from_pair_fname_txt
from easyreg.utils import gen_affine_map, get_inverse_affine_param
from glob import glob
import copy
def get_pair_list(txt_pth):
moving_momentum_path_list = read_img_label_into_list(txt_pth)
return moving_momentum_path_list
def get_init_weight_list(folder_path):
weight_path = os.path.join(folder_path,'pair_weight_path_list.txt')
init_weight_path = read_img_label_into_list(weight_path)
return init_weight_path
def get_setting(path,output_path,setting_name = "mermaid"):
params = pars.ParameterDict()
params.load_JSON(path)
os.makedirs(output_path,exist_ok=True)
output_path = os.path.join(output_path,'{}_setting.json'.format(setting_name))
params.write_JSON(output_path,save_int=False)
return params
def save_deformation(phi,output_path,fname_list):
phi_np = phi.detach().cpu().numpy()
for i in range(phi_np.shape[0]):
phi = nib.Nifti1Image(phi_np[i], np.eye(4))
nib.save(phi, os.path.join(output_path,fname_list[i]+'.nii.gz'))
def get_file_name(file_path,last_ocur=True):
if not last_ocur:
name= os.path.split(file_path)[1].split('.')[0]
else:
name = os.path.split(file_path)[1].rsplit('.',1)[0]
name = name.replace('.nii','')
name = name.replace('.','d')
return name
class DataAug(object):
def __init__(self,aug_setting_path):
self.aug_setting_path = aug_setting_path
self.aug_setting = get_setting(aug_setting_path,"aug")
self.max_aug_num = self.aug_setting['data_aug'][('max_aug_num',1000,"the max num of rand aug, only set when in dataset rand augmentation mode")]
class FluidAug(DataAug):
def __init__(self,aug_setting_path,mermaid_setting_path):
DataAug.__init__(self,aug_setting_path)
self.mermaid_setting_path = mermaid_setting_path
self.mermaid_setting = get_setting(mermaid_setting_path,"mermaid")
self.init_setting()
def init_setting(self):
aug_setting = self.aug_setting
self.K = aug_setting['data_aug']["fluid_aug"][('K',1,"the dimension of the geodeisc subspace")]
self.task_type = aug_setting['data_aug']["fluid_aug"][('task_type',"rand_sampl/data_interp, rand_sampl: random sampling from the geodesic space, typically for dataset augmentation;"
" data_interp: interpolation between source and the target set with given time point and given weight")]
self.compute_inverse = aug_setting['data_aug']["fluid_aug"][('compute_inverse',True,"compute the inverse map")]
self.save_tf_map = aug_setting['data_aug']["fluid_aug"][('save_tf_map',True,"save the transformation map")]
self.rand_w_t = True if self.task_type=="rand_sampl" else False
self.t_aug_list= aug_setting['data_aug']["fluid_aug"]['data_interp'][('t_aug_list',[1.0],"list of number, the time points for inter-/extra-polation")]
self.weight_list = self.aug_setting['data_aug']["fluid_aug"]['data_interp'][('weight_list',[[1.0]],"list of list, the weight for each target image, set in data_interp mode")]
self.t_range = aug_setting['data_aug']["fluid_aug"]['rand_sampl'][('t_range',[-1,2],"the range of t inter-/extra-polation, the registration completes in unit time [0,1]")]
self.rand_momentum_shrink_factor = self.aug_setting['data_aug']["fluid_aug"]['aug_with_random_momentum'][('rand_momentum_shrink_factor',8,"the size of random momentum is 1/rand_momentum_shrink_factor of the original image sz")]
self.magnitude = self.aug_setting['data_aug']["fluid_aug"]['aug_with_random_momentum'][('magnitude',1.5,"the magnitude of the random momentum")]
self.affine_back_to_original_postion = self.aug_setting['data_aug']["fluid_aug"]['aug_with_nonaffined_data'][('affine_back_to_original_postion',False,"transform the new image to the original postion")]
self.resize_output = self.aug_setting['data_aug']["fluid_aug"][('resize_output',[-1,-1,-1],"set the resized size otherwise [-1,-1,-1]")]
def generate_aug_data(self,*args):
pass
#
#
# def generate_single_res(self,moving, l_moving, momentum, init_weight, initial_map, initial_inverse_map, fname, t_aug, output_path, moving_path):
# """
# here we generate a deformed image, this function takes the full resolution map with highest precision
# if memory is not allowed, try the same function (commented) below that forwards the half resolution map
# :param moving:
# :param l_moving:
# :param momentum:
# :param init_weight:
# :param initial_map:
# :param initial_inverse_map:
# :param fname:
# :param t_aug:
# :param output_path:
# :param moving_path:
# :return:
# """
# params = self.mermaid_setting
# params['model']['registration_model']['forward_model']['tTo'] = t_aug
#
# # here we assume the momentum is computed at low_resol_factor=0.5
# resize_flag = self.resize_output != [-1, -1, -1]
# org_spacing = 1.0 / (np.array(moving.shape[2:]) - 1)
# input_spacing = 1.0 / (np.array(self.resize_output) - 1) if resize_flag else org_spacing
# input_img_sz = [1,1] + self.resize_output if resize_flag else list(moving.shape)
#
# size_diff = not input_img_sz == list(moving.shape)
# if size_diff:
# input_img, _ = resample_image(moving, org_spacing, input_img_sz)
# else:
# input_img = moving
#
# if momentum is not None:
# mom_spacing = 1./(np.array(momentum.shape[2:])-1)
# momentum_sz = [1, 3] + [int(dim) for dim in input_img_sz[2:]]
# momentum, _ = resample_image(momentum, mom_spacing, momentum_sz, spline_order=1, zero_boundary=True)
# else:
# input_img_sz = list(moving.shape)
# momentum_sz_low = [1, 3] + [int(dim /self.rand_momentum_shrink_factor) for dim in input_img_sz[2:]]
# momentum_sz = [1, 3] + [int(dim) for dim in input_img_sz[2:]]
# momentum = (np.random.rand(*momentum_sz_low) * 2 - 1) * self.magnitude
# mom_spacing = 1./(np.array(momentum_sz_low[2:])-1)
# momentum = torch.Tensor(momentum).to(moving.device)
# momentum, _ = resample_image(momentum,mom_spacing,momentum_sz,spline_order=1,zero_boundary=True)
#
# if initial_map is not None:
# initial_map, _ = resample_image(initial_map, input_spacing, [1, 3] + list(momentum.shape[2:]))
# if initial_inverse_map is not None:
# initial_inverse_map, _ = resample_image(initial_inverse_map, input_spacing, [1, 3] + list(momentum.shape[2:]))
# individual_parameters = dict(m=momentum, local_weights=init_weight)
# sz = np.array(input_img.shape)
# extra_info = None
# visual_param = None
# res = evaluate_model(input_img, input_img, sz, input_spacing,
# use_map=True,
# compute_inverse_map=self.compute_inverse,
# map_low_res_factor=1.0,
# compute_similarity_measure_at_low_res=False,
# spline_order=1,
# individual_parameters=individual_parameters,
# shared_parameters=None, params=params, extra_info=extra_info, visualize=False,
# visual_param=visual_param, given_weight=False,
# init_map=initial_map,
# init_inverse_map=initial_inverse_map)
# phi = res[1]
# phi_new = phi
# warped = compute_warped_image_multiNC(moving, phi_new, org_spacing, spline_order=1, zero_boundary=True) # input sz
# if initial_inverse_map is not None and self.affine_back_to_original_postion:
# # here we take zero boundary boundary which need two step image interpolation
# warped = compute_warped_image_multiNC(warped, initial_inverse_map, org_spacing, spline_order=1, zero_boundary=True) # input sz
# phi_new = compute_warped_image_multiNC(phi_new, initial_inverse_map, org_spacing, spline_order=1) # input sz
# save_image_with_given_reference(warped, [moving_path], output_path, [fname + '_image'])
# if l_moving is not None:
# # we assume the label doesnt lie at the boundary
# l_warped = compute_warped_image_multiNC(l_moving, phi_new, org_spacing, spline_order=0, zero_boundary=True) # input sz
# save_image_with_given_reference(l_warped, [moving_path], output_path, [fname + '_label'])
#
# if self.save_tf_map:
# save_deformation(phi_new, output_path, [fname + '_phi_map'])
# if self.compute_inverse:
# phi_inv = res[2]
# inv_phi_new = phi_inv
# if self.affine_back_to_original_postion:
# print("Cannot compute the inverse map when affine back to the source image position")
# return
# save_deformation(inv_phi_new, output_path, [fname + '_inv_map'])
# inverse_warped = compute_warped_image_multiNC(warped, inv_phi_new, input_spacing, spline_order=1,
# zero_boundary=True) # input sz
# save_image_with_given_reference(inverse_warped, [moving_path], output_path, [fname + '_image_inversed'])
def generate_single_res(self,moving, l_moving, momentum, init_weight, initial_map, initial_inverse_map, fname, t_aug, output_path, moving_path):
params = self.mermaid_setting
params['model']['registration_model']['forward_model']['tTo'] = t_aug
# here we assume the momentum is computed at low_resol_factor=0.5
if momentum is not None:
input_img_sz = [1, 1] + [int(sz * 2) for sz in momentum.shape[2:]]
else:
input_img_sz = [1, 1] + [int(sz/2)*2 for sz in moving.shape[2:]]
momentum_sz_low = [1, 3] + [int(dim /self.rand_momentum_shrink_factor) for dim in input_img_sz[2:]]
momentum_sz = [1, 3] + [int(dim / 2) for dim in input_img_sz[2:]]
momentum = (np.random.rand(*momentum_sz_low) * 2 - 1) * self.magnitude
mom_spacing = 1./(np.array(momentum_sz_low[2:])-1)
momentum = torch.Tensor(momentum).cuda()
momentum, _ = resample_image(momentum,mom_spacing,momentum_sz,spline_order=1,zero_boundary=True)
if self.resize_output != [-1, -1, -1]:
momentum_sz = [1, 3] + [int(dim / 2) for dim in self.resize_output]
mom_spacing = 1./(np.array(momentum_sz[2:])-1)
momentum, _ = resample_image(momentum,mom_spacing,momentum_sz,spline_order=1,zero_boundary=True)
input_img_sz = [1, 1] + [int(sz * 2) for sz in momentum.shape[2:]]
org_spacing = 1.0 / (np.array(moving.shape[2:]) - 1)
input_spacing = 1.0 / (np.array(input_img_sz[2:]) - 1)
size_diff = not input_img_sz == list(moving.shape)
if size_diff:
input_img, _ = resample_image(moving, org_spacing, input_img_sz)
else:
input_img = moving
low_initial_map = None
low_init_inverse_map = None
if initial_map is not None:
low_initial_map, _ = resample_image(initial_map, input_spacing, [1, 3] + list(momentum.shape[2:]))
if initial_inverse_map is not None:
low_init_inverse_map, _ = resample_image(initial_inverse_map, input_spacing, [1, 3] + list(momentum.shape[2:]))
individual_parameters = dict(m=momentum, local_weights=init_weight)
sz = np.array(input_img.shape)
extra_info = None
visual_param = None
res = evaluate_model(input_img, input_img, sz, input_spacing,
use_map=True,
compute_inverse_map=self.compute_inverse,
map_low_res_factor=0.5,
compute_similarity_measure_at_low_res=False,
spline_order=1,
individual_parameters=individual_parameters,
shared_parameters=None, params=params, extra_info=extra_info, visualize=False,
visual_param=visual_param, given_weight=False,
init_map=initial_map, lowres_init_map=low_initial_map,
init_inverse_map=initial_inverse_map,lowres_init_inverse_map=low_init_inverse_map)
phi = res[1]
phi_new = phi
if size_diff:
phi_new, _ = resample_image(phi, input_spacing, [1, 3] + list(moving.shape[2:]))
warped = compute_warped_image_multiNC(moving, phi_new, org_spacing, spline_order=1, zero_boundary=True)
if initial_inverse_map is not None and self.affine_back_to_original_postion:
# here we take zero boundary boundary which need two step image interpolation
warped = compute_warped_image_multiNC(warped, initial_inverse_map, org_spacing, spline_order=1, zero_boundary=True)
phi_new = compute_warped_image_multiNC(phi_new, initial_inverse_map, org_spacing, spline_order=1)
save_image_with_given_reference(warped, [moving_path], output_path, [fname + '_image'])
if l_moving is not None:
# we assume the label doesnt lie at the boundary
l_warped = compute_warped_image_multiNC(l_moving, phi_new, org_spacing, spline_order=0, zero_boundary=True)
save_image_with_given_reference(l_warped, [moving_path], output_path, [fname + '_label'])
if self.save_tf_map:
save_deformation(phi_new, output_path, [fname + '_phi_map'])
if self.compute_inverse:
phi_inv = res[2]
inv_phi_new = phi_inv
if self.affine_back_to_original_postion:
print("Cannot compute the inverse map when affine back to the source image position")
return
if size_diff:
inv_phi_new, _ = resample_image(phi_inv, input_spacing, [1, 3] + list(moving.shape[2:]))
save_deformation(inv_phi_new, output_path, [fname + '_inv_map'])
class FluidRand(FluidAug):
def __init__(self,aug_setting_path,mermaid_setting_path):
FluidAug.__init__(self,aug_setting_path,mermaid_setting_path)
def get_input(self,moving_path_list,fname, init_weight_path_list=None):
""" each line include the path of moving, the path of label (None if not exist), path of momentum1, momentum2...."""
fr_sitk = lambda x: torch.Tensor(sitk.GetArrayFromImage(sitk.ReadImage(x))).cuda()
moving = fr_sitk(moving_path_list[0])[None][None]
l_moving = None
if moving_path_list[1] is not None:
l_moving = fr_sitk(moving_path_list[1])[None][None]
if fname is None:
moving_name =get_file_name(moving_path_list[0])
if self.resize_output != [-1.,-1,-1]:
moving,_ = resample_image(moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=1,zero_boundary=True)
if moving_path_list[1] is not None:
l_moving,_ = resample_image(l_moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=0,zero_boundary=True)
return moving, l_moving, moving_name
def generate_aug_data(self,moving_path_list,fname_list, init_weight_path_list, output_path):
max_aug_num = self.max_aug_num
t_range = self.t_range
t_span = t_range[1]-t_range[0]
num_pair = len(moving_path_list)
for i in range(num_pair):
moving_path = moving_path_list[i][0]
fname = fname_list[i] if fname_list is not None else None
moving, l_moving, moving_name = self.get_input(moving_path_list[i],fname, None)
num_aug = round(max_aug_num / num_pair)
for _ in range(num_aug):
t_aug = random.random() * t_span +t_range[0]
momentum = None
fname = moving_name + '_{:.4f}_t_{:.2f}'.format(random.random(), t_aug)
self.generate_single_res(moving, l_moving, momentum, None, None,None, fname, t_aug, output_path, moving_path)
class FluidAffined(FluidAug):
def __init__(self,aug_setting_path,mermaid_setting_path):
FluidAug.__init__(self,aug_setting_path,mermaid_setting_path)
def get_input(self,moving_momentum_path_list,fname_list, init_weight_path_list):
""" each line include the path of moving, the path of label (None if not exist), path of momentum1, momentum2...."""
fr_sitk = lambda x: torch.Tensor(sitk.GetArrayFromImage(sitk.ReadImage(x))).cuda()
moving = fr_sitk(moving_momentum_path_list[0])[None][None]
l_moving = None
if moving_momentum_path_list[1] is not None:
l_moving = fr_sitk(moving_momentum_path_list[1])[None][None]
momentum_list = [((fr_sitk(path)).permute(3,2,1,0))[None] for path in moving_momentum_path_list[2:]]
if init_weight_path_list is not None:
init_weight_list = [[fr_sitk(path) for path in init_weight_path_list]]
else:
init_weight_list = None
if fname_list is None:
moving_name = get_file_name(moving_momentum_path_list[0])
target_name_list = [get_file_name(path) for path in moving_momentum_path_list[2:]]
target_name_list = [fname.replace("_0000_Momentum", '') for fname in target_name_list]
else:
moving_name = fname_list[0]
target_name_list = fname_list[1:]
if self.resize_output != [-1.,-1,-1]:
moving,_ = resample_image(moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=1,zero_boundary=True)
if moving_momentum_path_list[1] is not None:
l_moving,_ = resample_image(l_moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=0,zero_boundary=True)
return moving, l_moving, momentum_list, init_weight_list, moving_name,target_name_list
def generate_aug_data(self,moving_momentum_path_list, fname_list,init_weight_path_list, output_path):
max_aug_num = self.max_aug_num
rand_w_t = self.rand_w_t
t_range = self.t_range
K = self.K
t_span = t_range[1]-t_range[0]
num_pair = len(moving_momentum_path_list)
for i in range(num_pair):
moving_path = moving_momentum_path_list[i][0]
fname = fname_list[i] if fname_list is not None else None
moving, l_moving, momentum_list, init_weight_list, moving_name, target_name_list = self.get_input(
moving_momentum_path_list[i],fname,init_weight_path_list[i] if init_weight_path_list else None)
num_aug = round(max_aug_num / num_pair) if rand_w_t else 1
for _ in range(num_aug):
num_momentum = len(momentum_list)
if rand_w_t:
t_aug_list = [random.random() * t_span +t_range[0]]
weight = np.array([random.random() for _ in range(K)])
weight_list = [weight / np.sum(weight)]
selected_index = random.sample(list(range(num_momentum)), K)
else:
t_aug_list = self.t_aug_list
weight_list = self.weight_list
K = num_momentum
selected_index = list(range(num_momentum))
for weight in weight_list:
assert len(weight)==num_momentum,"In data-interp mode, the weight should be the same size of the momentum set"
for t_aug in t_aug_list:
for weight in weight_list:
momentum = torch.zeros_like(momentum_list[0])
fname = moving_name + '_'
suffix =""
for k in range(K):
momentum += weight[k] * momentum_list[selected_index[k]]
fname += target_name_list[selected_index[k]] + '_'
suffix += '{:.4f}_'.format(weight[k])
fname = fname + suffix +'t_{:.2f}'.format(t_aug)
fname = fname.replace('.', 'd')
init_weight = None
if init_weight_list is not None:
init_weight = random.sample(init_weight_list, 1)
self.generate_single_res(moving, l_moving, momentum, init_weight, None,None, fname, t_aug, output_path, moving_path)
class FluidNonAffined(FluidAug):
def __init__(self,aug_setting_path,mermaid_setting_path):
FluidAug.__init__(self,aug_setting_path,mermaid_setting_path)
def read_affine_param_and_output_map(self,affine_param_path,img_sz):
affine_param = np.load(affine_param_path)
affine_param = torch.Tensor(affine_param)[None].cuda()
affine_map = gen_affine_map(affine_param,img_sz)
inverse_affine_param = get_inverse_affine_param(affine_param)
inverse_affine_map = gen_affine_map(inverse_affine_param,img_sz)
affine_map = (affine_map+1.)/2
inverse_affine_map = (inverse_affine_map+1.)/2
return affine_map, inverse_affine_map
def get_input(self,moving_momentum_path_list,fname_list, init_weight_path_list):
"""
each line includes path of moving, path of moving label(None if not exists), path of mom_1,...mom_m, affine_1....affine_m
"""
fr_sitk = lambda x: torch.Tensor(sitk.GetArrayFromImage(sitk.ReadImage(x))).cuda()
moving = fr_sitk(moving_momentum_path_list[0])[None][None]
l_moving = None
if moving_momentum_path_list[1] is not None:
l_moving = fr_sitk(moving_momentum_path_list[1])[None][None]
num_m = int((len(moving_momentum_path_list)-2)/2)
momentum_list =[fr_sitk(path).permute(3,2,1,0)[None] for path in moving_momentum_path_list[2:num_m+2]]
#affine_list =[fr_sitk(path).permute(3,2,1,0)[None] for path in moving_momentum_path_list[num_m+2:]]
affine_forward_inverse_list =[self.read_affine_param_and_output_map(path,moving.shape[2:]) for path in moving_momentum_path_list[num_m+2:]]
affine_list = [forward_inverse[0] for forward_inverse in affine_forward_inverse_list]
inverse_affine_list = [forward_inverse[1] for forward_inverse in affine_forward_inverse_list]
if init_weight_path_list is not None:
init_weight_list=[[fr_sitk(path) for path in init_weight_path_list]]
else:
init_weight_list=None
if fname_list is None:
moving_name = get_file_name(moving_momentum_path_list[0])
target_name_list = [get_file_name(path) for path in moving_momentum_path_list[2:num_m + 2]]
target_name_list = [fname.replace("_0000_Momentum", '') for fname in target_name_list]
else:
moving_name = fname_list[0]
target_name_list = fname_list[1:]
if self.resize_output != [-1.,-1,-1]:
moving,_ = resample_image(moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=1,zero_boundary=True)
if moving_momentum_path_list[1] is not None:
l_moving,_ = resample_image(l_moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=0,zero_boundary=True)
return moving, l_moving, momentum_list, init_weight_list, affine_list,inverse_affine_list, moving_name, target_name_list
def generate_aug_data(self,moving_momentum_path_list,fname_list, init_weight_path_list, output_path):
max_aug_num = self.max_aug_num
rand_w_t = self.rand_w_t
t_range = self.t_range
K = 1 # for non-affined case, the K is set to 1
t_span = t_range[1] - t_range[0]
num_pair = len(moving_momentum_path_list)
for i in range(num_pair):
moving_path = moving_momentum_path_list[i][0]
fname = fname_list[i] if fname_list is not None else None
moving, l_moving, momentum_list, init_weight_list, affine_list,inverse_affine_list, moving_name, target_name_list = self.get_input(
moving_momentum_path_list[i], fname,init_weight_path_list[i] if init_weight_path_list else None)
num_aug = max(round(max_aug_num / num_pair),1) if rand_w_t else 1
for _ in range(num_aug):
num_momentum = len(momentum_list)
if rand_w_t:
t_aug_list = [random.random() * t_span + t_range[0]]
selected_index = random.sample(list(range(num_momentum)), K)
else:
if num_momentum >1:
print("for non-affined image and for data_interp mode, the size of the momentum set should be 1")
t_aug_list = self.t_aug_list
selected_index = [0]
for t_aug in t_aug_list:
momentum = momentum_list[selected_index[0]]
affine = affine_list[selected_index[0]]
inverse_affine = inverse_affine_list[selected_index[0]]
fname = moving_name + "_" + target_name_list[selected_index[0]] + '_t_{:.2f}'.format(t_aug)
fname = fname.replace('.', 'd')
init_weight = None
if init_weight_list is not None:
init_weight = random.sample(init_weight_list, 1)
self.generate_single_res(moving, l_moving, momentum, init_weight, affine,inverse_affine, fname, t_aug, output_path, moving_path)
class FluidAtlas(FluidAug):
def __init__(self,aug_setting_path,mermaid_setting_path):
FluidAug.__init__(self,aug_setting_path,mermaid_setting_path)
self.to_atlas_folder = self.aug_setting['data_aug']["fluid_aug"]["aug_with_atlas"][
('to_atlas_folder', None, "the folder containing the image to atlas transformation")]
self.atlas_to_folder = self.aug_setting['data_aug']["fluid_aug"]["aug_with_atlas"][
('atlas_to_folder', None, "the folder containing the atlas to image momentum")]
def get_input(self,moving_momentum_path_list,moving_name, init_weight_path_list):
"""
each line include the path of moving, the path of label(None if not exists)
:return:
"""
fr_sitk = lambda x: torch.Tensor(sitk.GetArrayFromImage(sitk.ReadImage(x))).cuda()
moving = fr_sitk(moving_momentum_path_list[0])[None][None]
l_moving = None
if moving_momentum_path_list[1] is not None:
l_moving = fr_sitk(moving_momentum_path_list[1])[None][None]
if moving_name is None:
moving_name = get_file_name(moving_momentum_path_list[0])
if self.resize_output != [-1.,-1,-1]:
moving,_ = resample_image(moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=1,zero_boundary=True)
if moving_momentum_path_list[1] is not None:
l_moving,_ = resample_image(l_moving,[1,1,1],desiredSize=[1,1]+self.resize_output,spline_order=0,zero_boundary=True)
return moving, l_moving,moving_name
def generate_aug_data(self,path_list,fname_list, init_weight_path_list, output_path):
"""
here we use the low-interface of mermaid to get efficient low-res- propagration (avod saving phi and inverse phi as well as the precision loss from unnecessary upsampling and downsampling
) which provide high precision in maps
"""
def create_mermaid_model(mermaid_json_pth, img_sz, compute_inverse=True):
import mermaid.model_factory as py_mf
spacing = 1. / (np.array(img_sz[2:]) - 1)
params = pars.ParameterDict()
params.load_JSON(mermaid_json_pth) # ''../easyreg/cur_settings_svf.json')
model_name = params['model']['registration_model']['type']
params.print_settings_off()
mermaid_low_res_factor = 0.5
lowResSize = get_res_size_from_size(img_sz, mermaid_low_res_factor)
lowResSpacing = get_res_spacing_from_spacing(spacing, img_sz, lowResSize)
##
mf = py_mf.ModelFactory(img_sz, spacing, lowResSize, lowResSpacing)
model, criterion = mf.create_registration_model(model_name, params['model'],
compute_inverse_map=True)
lowres_id = identity_map_multiN(lowResSize, lowResSpacing)
lowResIdentityMap = torch.from_numpy(lowres_id).cuda()
_id = identity_map_multiN(img_sz, spacing)
identityMap = torch.from_numpy(_id).cuda()
mermaid_unit_st = model.cuda()
mermaid_unit_st.associate_parameters_with_module()
return mermaid_unit_st, criterion, lowResIdentityMap, lowResSize, lowResSpacing, identityMap, spacing
def _set_mermaid_param(mermaid_unit, m):
mermaid_unit.m.data = m
def _do_mermaid_reg(mermaid_unit, low_phi, m, low_s=None, low_inv_phi=None):
with torch.no_grad():
_set_mermaid_param(mermaid_unit, m)
low_phi = mermaid_unit(low_phi, low_s, phi_inv=low_inv_phi)
return low_phi
def get_momentum_name(momentum_path):
fname = get_file_name(momentum_path)
fname = fname.replace("_0000_Momentum", '')
return fname
max_aug_num = self.max_aug_num
rand_w_t = self.rand_w_t
t_range = self.t_range
t_span = t_range[1]-t_range[0]
K = self.K
num_pair = len(path_list)
assert init_weight_path_list is None, "init weight has not supported yet"
# load all momentums for atlas to images
read_image = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))
atlas_to_momentum_path_list = list(filter(lambda x: "Momentum" in x and get_file_name(x).find("atlas") == 0,
glob(os.path.join(self.atlas_to_folder, "*nii.gz"))))
to_atlas_momentum_path_list = list(filter(lambda x: "Momentum" in x and get_file_name(x).find("atlas") != 0,
glob(os.path.join(self.to_atlas_folder, "*nii.gz"))))
atlas_to_momentum_list = [torch.Tensor(read_image(atlas_momentum_pth).transpose()[None]).cuda() for atlas_momentum_pth
in atlas_to_momentum_path_list]
to_atlas_momentum_list = [torch.Tensor(read_image(atlas_momentum_pth).transpose()[None]).cuda() for atlas_momentum_pth
in to_atlas_momentum_path_list]
moving_example = read_image(path_list[0][0])
img_sz = list(moving_example.shape)
mermaid_unit_st, criterion, lowResIdentityMap, lowResSize, lowResSpacing, identityMap, spacing = create_mermaid_model(
mermaid_setting_path, [1, 1] + img_sz, self.compute_inverse)
for i in range(num_pair):
fname = fname_list[i] if fname_list is not None else None
moving, l_moving,moving_name = self.get_input(path_list[i], fname, None)
# get the transformation to atlas, which should simply load the transformation map
low_moving = get_resampled_image(moving, None, lowResSize, 1, zero_boundary=True,
identity_map=lowResIdentityMap)
init_map = lowResIdentityMap.clone()
init_inverse_map = lowResIdentityMap.clone()
index = list(filter(lambda x: moving_name in x, to_atlas_momentum_path_list))[0]
index = to_atlas_momentum_path_list.index(index)
# here we only interested in forward the map, so the moving image doesn't affect
mermaid_unit_st.integrator.cparams['tTo'] = 1.0
low_phi_to_atlas, low_inverse_phi_to_atlas = _do_mermaid_reg(mermaid_unit_st, init_map,
to_atlas_momentum_list[index], low_moving,
low_inv_phi=init_inverse_map)
num_aug = max(round(max_aug_num / num_pair),1) if rand_w_t else 1
for _ in range(num_aug):
num_momentum = len(atlas_to_momentum_list)
if rand_w_t:
t_aug_list = [random.random() * t_span + t_range[0]]
weight = np.array([random.random() for _ in range(K)])
weight_list = [weight / np.sum(weight)]
selected_index = random.sample(list(range(num_momentum)), K)
else:
raise ValueError("In atlas augmentation mode, the data interpolation is disabled")
for t_aug in t_aug_list:
if t_aug ==0:
continue
for weight in weight_list:
momentum = torch.zeros_like(atlas_to_momentum_list[0])
fname = moving_name + "_to_"
suffix = ""
for k in range(K):
momentum += weight[k] * atlas_to_momentum_list[selected_index[k]]
fname += get_momentum_name(atlas_to_momentum_path_list[selected_index[k]]) + '_'
suffix += '{:.4f}_'.format(weight[k])
fname = fname + suffix + 't_{:.2f}'.format(t_aug)
fname = fname.replace('.', 'd')
mermaid_unit_st.integrator.cparams['tTo'] = t_aug
low_phi_atlas_to, low_inverse_phi_atlas_to = _do_mermaid_reg(mermaid_unit_st, low_phi_to_atlas.clone(),
momentum, low_moving,
low_inv_phi=low_inverse_phi_to_atlas.clone())
foward_map = get_resampled_image(low_phi_atlas_to, lowResSpacing, [1, 3] + img_sz, 1,
zero_boundary=False,
identity_map=identityMap)
inverse_map = get_resampled_image(low_inverse_phi_atlas_to, lowResSpacing, [1, 3] + img_sz, 1,
zero_boundary=False,
identity_map=identityMap)
warped = compute_warped_image_multiNC(moving, foward_map, spacing, spline_order=1, zero_boundary=True)
if l_moving is not None:
l_warped = compute_warped_image_multiNC(l_moving, foward_map, spacing, spline_order=0,
zero_boundary=True)
save_image_with_given_reference(l_warped, [path_list[i][0]], output_path, [fname + '_label'])
save_image_with_given_reference(warped, [path_list[i][0]], output_path, [fname + '_image'])
if self.save_tf_map:
if self.compute_inverse:
# save_deformation(foward_map, output_path, [fname + '_phi'])
save_deformation(inverse_map, output_path, [fname + '_inv_phi'])
class RandomBSplineTransform(object):
"""
Apply random BSpline Transformation to a 3D image
check https://itk.org/Doxygen/html/classitk_1_1BSplineTransform.html for details of BSpline Transform
"""
def __init__(self,mesh_size=(3,3,3), bspline_order=2, deform_scale=1.0, ratio=0.5, interpolator=sitk.sitkLinear,
random_mode = 'Normal'):
self.mesh_size = mesh_size
self.bspline_order = bspline_order
self.deform_scale = deform_scale
self.ratio = ratio # control the probability of conduct transform
self.interpolator = interpolator
self.random_mode = random_mode
def resample(self,image, transform, interpolator=sitk.sitkBSpline, default_value=0.0):
"""Resample a transformed image"""
reference_image = image
return sitk.Resample(image, reference_image, transform,
interpolator, default_value)
def __call__(self, sample):
random_state = np.random.RandomState()
if np.random.rand(1)[0] < self.ratio:
img_tm, seg_tm = sample['image'], sample['label']
img = sitk.GetImageFromArray(sitk.GetArrayFromImage(img_tm).copy())
img.CopyInformation(img_tm)
seg = sitk.GetImageFromArray(sitk.GetArrayFromImage(seg_tm).copy())
seg.CopyInformation(seg_tm)
# initialize a bspline transform
bspline = sitk.BSplineTransformInitializer(img, self.mesh_size, self.bspline_order)
# generate random displacement for control points, the deformation is scaled by deform_scale
if self.random_mode == 'Normal':
control_point_displacements = random_state.normal(0, self.deform_scale/2, len(bspline.GetParameters()))
elif self.random_mode == 'Uniform':
control_point_displacements = random_state.random(len(bspline.GetParameters())) * self.deform_scale
#control_point_displacements[0:int(len(control_point_displacements) / 3)] = 0 # remove z displacement
bspline.SetParameters(control_point_displacements)
# deform and resample image
img_trans = self.resample(img, bspline, interpolator=self.interpolator, default_value=0.01)
seg_trans = self.resample(seg, bspline, interpolator=sitk.sitkNearestNeighbor, default_value=0)
new_sample = {}
new_sample['image'] = img_trans
new_sample['label'] = seg_trans
else:
new_sample = sample
return new_sample
class BsplineAug(DataAug):
def __init__(self, aug_setting_path):
DataAug.__init__(self, aug_setting_path)
self.mesh_size_list = self.aug_setting['data_aug']["bspline_aug"][("mesh_size_list",[[10,10,10]],"list of mesh size,"
" e.g., [[10,10,10],[20,20,20]], for each augmentation, a setting will be sampled from the two")]
self.deform_scale_list = self.aug_setting['data_aug']["bspline_aug"][("deform_scale_list",[3],"list of mesh size, "
"e.g., [2,3], should has one-to-one correspondence with the mesh_size_list for each augmentation, a setting will be sampled from the two",)]
self.aug_ratio = self.aug_setting['data_aug']["bspline_aug"][("aug_ratio",0.95,
"chance to deform the image, i.e., 0.5 refers to ratio of the deformed images and the non-deformed (original) image")]
assert len(self.mesh_size_list) == len(self.deform_scale_list)
def get_input(self,moving_path_list,fname_list):
moving = [sitk.ReadImage(pth[0]) for pth in moving_path_list]
l_moving = [sitk.ReadImage(pth[1]) for pth in moving_path_list]
if fname_list is None:
fname_list = [get_file_name(pth[0]) for pth in moving_path_list]
return moving, l_moving, fname_list
def generate_aug_data(self, moving_path_list,fname_list, output_path):
num_pair = len(moving_path_list)
num_aug = int(self.max_aug_num / num_pair)
moving_list, l_moving_list, fname_list = self.get_input(moving_path_list,fname_list)
bspline_func_list = [RandomBSplineTransform(mesh_size=self.mesh_size_list[i], bspline_order=2, deform_scale=self.deform_scale_list[i], ratio=self.aug_ratio)
for i in range(len(self.mesh_size_list))]
for i in range(num_pair):
sample = {'image': moving_list[i], 'label': l_moving_list[i]}
for _ in range(num_aug):
bspline_func = random.sample(bspline_func_list, 1)
aug_sample = bspline_func[0](sample)
fname = fname_list[i] + '_{:.4f}'.format(random.random())
fname = fname.replace('.', 'd')
sitk.WriteImage(aug_sample['image'], os.path.join(output_path, fname + '_image.nii.gz'))
sitk.WriteImage(aug_sample['label'], os.path.join(output_path, fname + '_label.nii.gz'))
def generate_aug_data(moving_momentum_path_list, fname_list,init_weight_path_list,output_path, mermaid_setting_path,fluid_mode,aug_setting_path,fluid_aug=True):
if fluid_aug:
if fluid_mode=='aug_with_affined_data':
fluid_aug = FluidAffined(aug_setting_path,mermaid_setting_path)
elif fluid_mode=='aug_with_nonaffined_data':
fluid_aug = FluidNonAffined(aug_setting_path,mermaid_setting_path)
elif fluid_mode== "aug_with_atlas":
fluid_aug = FluidAtlas(aug_setting_path,mermaid_setting_path)
elif fluid_mode=='aug_with_random_momentum':
fluid_aug = FluidRand(aug_setting_path,mermaid_setting_path)
else:
raise ValueError("not supported mode, should be aug_with_affined_data/aug_with_nonaffined_data/aug_with_atlas/aug_with_random_momentum")
fluid_aug.generate_aug_data(moving_momentum_path_list, fname_list,init_weight_path_list, output_path)
else:
moving_path_list = moving_momentum_path_list
bspline_aug = BsplineAug(aug_setting_path)
bspline_aug.generate_aug_data(moving_path_list,fname_list, output_path)
if __name__ == '__main__':
"""
Two data augmentation methods are supported
1) fluid-based anatomical data augmentation
2) random transformation
For fluid-based anatomical data augmentation:
we support two task type:
1. random sampling from the geodesic subspace
2. data interpolation with given time point and the weight for each target image.
And support three strategy:
aug_with_affined_data/aug_with_nonaffined_data/aug_with_atlas
For random transformation:
we support bspline random transformation and fluid-based random momentum augmentation
See the EasyReg document for more details
For the input txt file,
for fluid augmentation (fluid_mode: aug_with_affined_data / aug_with_nonaffined_data) : each line include a source image path, source label path (None if not exist), N momentum paths that register to N target images
for fluid augmentation (fluid_mode: aug_with_atlas / aug_with_random_momentum): each line include a source image path, source_label path (None if not exist)
for bspline augmentation : each line include a source image path, source_label path (None if not exist)
the name_txt (optional, will use the filename if not provided) include the fname for each image ( to avoid confusion of source images with the same filename)
for fluid augmentation (fluid_mode: aug_with_affined_data/aug_with_nonaffined_data) : each line include a source name, N target name
for fluid augmentation (fluid_mode: aug_with_atlas and aug_with_random_momentum): a source name
for bspline augmentation : each line include a source image, source_label (None if not exist): a source name
"""
import argparse
parser = argparse.ArgumentParser(description='Registration demo for data augmentation')
parser.add_argument("-t",'--file_txt', required=False, default=None,
help='the file path of input txt, exclusive with random_m')
parser.add_argument("-n", '--name_txt', required=False, default=None,
help=' txt recording corresponding file name')
parser.add_argument("-w",'--rdmm_preweight_txt_path', required=False, default=None,
help='the file path of rdmm preweight txt, only needed when use predefined rdmm model,(need to further test)')
parser.add_argument("-m",'--fluid_mode',required=False,default=None,
help='aug_with_affined_data/aug_with_nonaffined_data/aug_with_atlas/aug_with_random_momentum')
parser.add_argument('--bspline', required=False, action='store_true',
help='data augmentation with bspline, exclusive random_m, rdmm_preweight_txt_path,compute_inverse')
parser.add_argument("-o",'--output_path', required=False, default='./rdmm_synth_data_generation/data_task',
help='the path of task folder')
parser.add_argument("-as",'--aug_setting_path', required=False, default=None,
help='path of data augmentation setting json')
parser.add_argument("-ms",'--mermaid_setting_path', required=False, default=None,
help='path of mermaid setting json')
parser.add_argument("-g", '--gpu_id', required=False, default=0,
help='path of mermaid setting json')
args = parser.parse_args()
file_txt = args.file_txt
name_txt = args.name_txt
rdmm_preweight_txt_path = args.rdmm_preweight_txt_path
use_init_weight = rdmm_preweight_txt_path is not None
mermaid_setting_path = args.mermaid_setting_path
aug_setting_path = args.aug_setting_path
fluid_mode = args.fluid_mode
use_bspline = args.bspline
output_path = args.output_path
gpu_id = int(args.gpu_id)
if use_bspline:
os.environ["CUDA_VISIBLE_DEVICES"] = ''
else:
torch.cuda.set_device(gpu_id)
assert os.path.isfile(file_txt),"{} not exists".format(file_txt)
assert os.path.isfile(aug_setting_path),"{} not exists".format(aug_setting_path)
if not use_bspline:
assert os.path.isfile(mermaid_setting_path),"{} not exists".format(mermaid_setting_path)
if fluid_mode is None and not use_bspline:
print("the fluid mode is not provided, now read from {}".format(aug_setting_path))
params = pars.ParameterDict()
params.load_JSON(aug_setting_path)
fluid_mode = params["data_aug"]["fluid_aug"]["fluid_mode"]
os.makedirs(output_path,exist_ok=True)
# if the use_random_m is false or use_bspline, then the each only include moving and its label(optional)
moving_momentum_path_list = get_pair_list(file_txt)
if name_txt is not None:
fname_list = read_fname_list_from_pair_fname_txt(name_txt,detail=True)
else:
fname_list = None
init_weight_path_list = None
if use_init_weight:
init_weight_path_list = get_init_weight_list(rdmm_preweight_txt_path)
generate_aug_data(moving_momentum_path_list,fname_list,init_weight_path_list,output_path, mermaid_setting_path,fluid_mode, aug_setting_path,fluid_aug= not use_bspline)
| 47,706 | 54.280417 | 235 | py |
easyreg | easyreg-master/demo/demo_for_seg_eval.py | import matplotlib as matplt
matplt.use('Agg')
import os, sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../easy_reg'))
import tools.module_parameters as pars
from abc import ABCMeta, abstractmethod
from easyreg.piplines import run_one_task
from easyreg.reg_data_utils import write_list_into_txt, get_file_name, read_txt_into_list
import torch
torch.backends.cudnn.benchmark=True
class BaseTask():
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
@abstractmethod
def save(self):
pass
class DataTask(BaseTask):
"""
base module for data setting files (.json)
"""
def __init__(self, name, path='../settings/base_data_settings.json'):
super(DataTask, self).__init__(name)
self.data_par = pars.ParameterDict()
self.data_par.load_JSON(path)
def save(self, path='../settings/data_settings.json'):
self.data_par.write_ext_JSON(path)
class ModelTask(BaseTask):
"""
base module for task setting files (.json)
"""
def __init__(self, name, path='../settings/base_task_settings.json'):
super(ModelTask, self).__init__(name)
self.task_par = pars.ParameterDict()
self.task_par.load_JSON(path)
def save(self, path='../settings/task_settings.json'):
self.task_par.write_ext_JSON(path)
def force_test_setting(dm, tsm, output_path):
"""
To run in test mode, force set related param in datapro and tsk_set.
The updated param are saved in output_path/cur_data_setting.json and output_path/cur_task_setting.json
:param dm: ParameterDict, settings for data proprecessing (disabled if the settings have already put in tsk_set)
:param tsm: ParameterDict, settings for the task
:param output_path:
:return: None
"""
if dm is not None:
data_json_path = os.path.join(output_path, 'cur_data_setting.json')
dm.data_par['datapro']['dataset']['prepare_data'] = False
dm.data_par['datapro']['reg']['max_num_for_loading'] = [1, 1, -1, 1]
dm.save(data_json_path)
else:
tsm.task_par['dataset']['max_num_for_loading'] = [1, 1, -1, 1]
tsm.task_par['tsk_set']['train'] = False
tsm.task_par['tsk_set']['continue_train'] = False
tsk_json_path = os.path.join(output_path, 'cur_task_setting.json')
tsm.save(tsk_json_path)
def init_test_env(setting_path, output_path, file_list,fname_list):
"""
create test environment, the file list would be saved into output_path/reg/test/file_path_list.txt,
a corresponding auto-parsed filename list would also be saved in output/path/reg/test/file_name_list.txt
:param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting)
:param output_path: the output path of the task
:param image_path_list: the image list, each item refers to the abstract path of the image
:param l_path_list:optional, the label of image list, each item refers to the abstract path of the image
:return: tuple of ParameterDict, datapro (optional) and tsk_set
"""
dm_json_path = os.path.join(setting_path, 'cur_data_setting.json')
tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json')
assert os.path.isfile(tsm_json_path), "task setting not exists"
dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None
tsm = ModelTask('task_reg', tsm_json_path)
file_num = len(file_list)
os.makedirs(os.path.join(output_path, 'seg/test'), exist_ok=True)
os.makedirs(os.path.join(output_path, 'seg/res'), exist_ok=True)
file_txt_path = os.path.join(output_path, 'seg/test/file_path_list.txt')
fn_txt_path = os.path.join(output_path, 'seg/test/file_name_list.txt')
has_label = len(file_list[0])==2
if fname_list is None:
if has_label:
fname_list = [get_file_name(file_list[i][0]) for i in range(file_num)]
else:
fname_list = [get_file_name(file_list[i]) for i in range(file_num)]
write_list_into_txt(file_txt_path, file_list)
write_list_into_txt(fn_txt_path, fname_list)
data_task_name = 'seg'
cur_task_name = 'res'
if dm is not None:
dm.data_par['datapro']['dataset']['output_path'] = output_path
dm.data_par['datapro']['dataset']['task_name'] = data_task_name
tsm.task_par['tsk_set']['task_name'] = cur_task_name
tsm.task_par['tsk_set']['output_root_path'] = os.path.join(output_path, data_task_name)
return dm, tsm
def do_segmentation_eval(args, segmentation_file_list):
"""
set running env and run the task
:param args: the parsed arguments
:param segmentation_file_list: list of segmentation file list, [image_list, label_list]
:return: None
"""
task_output_path = args.task_output_path
os.makedirs(task_output_path, exist_ok=True)
setting_folder_path = args.setting_folder_path
file_txt_path = ''
if args.file_txt_path:
file_txt_path = args.file_txt_path
fname_txt_path = os.path.join(os.path.split(file_txt_path)[0],"file_name_list.txt")
fname_list = read_txt_into_list(fname_txt_path) if os.path.isfile(fname_txt_path) else None
else:
print(segmentation_file_list)
fname_list = [[f.split('/')[-1].split('.')[0] for f in segmentation_file_list[0]]]*2
dm, tsm = init_test_env(setting_folder_path, task_output_path, segmentation_file_list, fname_list)
tsm.task_par['tsk_set']['gpu_ids'] = args.gpu_id
model_path= args.model_path
if model_path is not None:
assert os.path.isfile(model_path), "the model {} not exist".format_map(model_path)
tsm.task_par['tsk_set']['model_path'] = model_path
force_test_setting(dm, tsm, task_output_path)
dm_json_path = os.path.join(task_output_path, 'cur_data_setting.json') if dm is not None else None
tsm_json_path = os.path.join(task_output_path, 'cur_task_setting.json')
run_one_task(tsm_json_path, dm_json_path)
if __name__ == '__main__':
"""
A evaluation interface for segmentation network with pre-trained models.
Arguments:
input related:two input styles are supported,
1. given txt
--file_txt_path/-txt: the txt file recording the paths of images to segmentation
2. given image
--image_list/ -i: the image list, s1 s2 s3..sn
--limage_list/ -li: optional, the label list, ls1,ls2,ls3..lsn
other arguments:
--setting_folder_path/-ts :path of the folder where settings are saved
--task_output_path/ -o: the path of output folder
--model_path/ -m: the path of pretrained model, can be set here or set in setting file
--gpu_id/ -g: gpu_id to use
"""
import argparse
parser = argparse.ArgumentParser(description='An easy interface for evaluate various segmentation methods')
parser.add_argument('-ts', '--setting_folder_path', required=False, type=str,
default=None,
help='path of the folder where settings are saved,should include cur_task_setting.json')
parser.add_argument('-txt', '--file_txt_path', required=False, default=None, type=str,
help='the txt file recording the paths of images for segmentation') # 2
parser.add_argument('-i', '--image_list', nargs='+', required=False, default=None,
help='the image list, s1 s2 s3..sn')
parser.add_argument('-li', '--limage_list', nargs='+', required=False, default=None,
help='the image label list, ls1,ls2,ls3..lsn')
parser.add_argument('-o', "--task_output_path", required=True, default=None, help='the output path')
parser.add_argument('-m', "--model_path", required=False, default=None, help='the path of trained model')
parser.add_argument('-g', "--gpu_id", required=False, type=int, default=0, help='gpu_id to use')
args = parser.parse_args()
print(args)
file_txt_path = args.file_txt_path
image_list = args.image_list
limage_list = args.limage_list
image_label_list = []
assert file_txt_path is not None or image_list is not None, "either file_txt_path or source/target_list should be provided"
assert file_txt_path is None or image_list is None, " file_txt_path and source/target_list cannot be both provided"
if file_txt_path is not None:
image_label_list = read_txt_into_list(file_txt_path)
if limage_list is not None:
assert len(image_list) == len(limage_list), "the image_list and limage_list should be the same length"
with open('file_path_list.txt', 'w+') as f:
f.write('{}\t{}'.format(image_list[0], limage_list[0]))
args.file_txt_path = 'file_path_list.txt'
image_label_list = read_txt_into_list('file_path_list.txt')
args.image_list = None
args.limage_list = None
do_segmentation_eval(args, image_label_list)
| 9,097 | 41.915094 | 152 | py |
easyreg | easyreg-master/easyreg/seg_net.py | from .base_seg_model import SegModelBase
from .net_utils import print_network
from .losses import Loss
import torch.optim.lr_scheduler as lr_scheduler
from .utils import *
from .seg_unet import SegUnet
from .metrics import get_multi_metric
model_pool = {
'seg_unet': SegUnet,
}
class SegNet(SegModelBase):
"""segmentation network class"""
def name(self):
return 'seg-net'
def initialize(self, opt):
"""
initialize variable settings of RegNet
:param opt: ParameterDict, task settings
:return:
"""
SegModelBase.initialize(self, opt)
method_name = opt['tsk_set']['method_name']
self.network = model_pool[method_name](opt)
"""create network model"""
self.criticUpdates = opt['tsk_set']['criticUpdates']
loss_fn = Loss(opt)
self.network.set_loss_fn(loss_fn)
self.opt_optim = opt['tsk_set']['optim']
"""settings for the optimizer"""
self.init_optimize_instance(warmming_up=True)
"""initialize the optimizer and scheduler"""
self.step_count = 0.
""" count of the step"""
print('---------- Networks initialized -------------')
print_network(self.network)
print('-----------------------------------------------')
def init_optimize_instance(self, warmming_up=False):
""" get optimizer and scheduler instance"""
self.optimizer, self.lr_scheduler, self.exp_lr_scheduler = self.init_optim(self.opt_optim, self.network,
warmming_up=warmming_up)
def update_learning_rate(self, new_lr=-1):
"""
set new learning rate
:param new_lr: new learning rate
:return:
"""
if new_lr < 0:
lr = self.opt_optim['lr']
else:
lr = new_lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
print(" the learning rate now is set to {}".format(lr))
def set_input(self, data, is_train=True):
"""
:param data:
:param is_train:
:return:
"""
img_and_label, self.fname_list = data
self.img_path = data[0]['img_path']
if self.gpu_ids is not None and self.gpu_ids>=0:
img_and_label['image'] = img_and_label['image'].cuda()
if 'label' in img_and_label:
img_and_label['label'] = img_and_label['label'].cuda()
input, gt = get_seg_pair(img_and_label, is_train)
self.input = input
self.input_img_sz = data[0]['image_after_resize']
self.gt = gt
self.spacing = data[0]['original_spacing']
def init_optim(self, opt, network, warmming_up=False):
"""
set optimizers and scheduler
:param opt: settings on optimizer
:param network: model with learnable parameters
:param warmming_up: if set as warmming up
:return: optimizer, custom scheduler, plateau scheduler
"""
optimize_name = opt['optim_type']
if not warmming_up:
lr = opt['lr']
print(" no warming up the learning rate is {}".format(lr))
else:
lr = opt['lr']/10
print(" warming up on the learning rate is {}".format(lr))
beta = opt['adam']['beta']
lr_sched_opt = opt[('lr_scheduler',{},"settings for learning scheduler")]
self.lr_sched_type = lr_sched_opt['type']
if optimize_name == 'adam':
re_optimizer = torch.optim.Adam(network.parameters(), lr=lr, betas=(beta, 0.999))
else:
re_optimizer = torch.optim.SGD(network.parameters(), lr=lr)
re_optimizer.zero_grad()
re_lr_scheduler = None
re_exp_lr_scheduler = None
if self.lr_sched_type == 'custom':
step_size = lr_sched_opt['custom'][('step_size',50,"update the learning rate every # epoch")]
gamma = lr_sched_opt['custom'][('gamma',0.5,"the factor for updateing the learning rate")]
re_lr_scheduler = torch.optim.lr_scheduler.StepLR(re_optimizer, step_size=step_size, gamma=gamma)
elif self.lr_sched_type == 'plateau':
patience = lr_sched_opt['plateau']['patience']
factor = lr_sched_opt['plateau']['factor']
threshold = lr_sched_opt['plateau']['threshold']
min_lr = lr_sched_opt['plateau']['min_lr']
re_exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(re_optimizer, mode='min', patience=patience,
factor=factor, verbose=True,
threshold=threshold, min_lr=min_lr)
return re_optimizer, re_lr_scheduler, re_exp_lr_scheduler
def cal_loss(self, output=None, gt=None):
loss = self.network.get_loss(output, gt)
return loss
def backward_net(self, loss):
loss.backward()
def get_debug_info(self):
""" get filename of the failed cases"""
info = {'file_name': self.fname_list}
return info
def forward(self, input=None):
"""
:param input(not used )
:return: warped image intensity with [-1,1], transformation map defined in [-1,1], affine image if nonparameteric reg else affine parameter
"""
if hasattr(self.network, 'set_cur_epoch'):
self.network.set_cur_epoch(self.cur_epoch)
output = self.network.forward(self.input, self.is_train)
loss = self.cal_loss(output, self.gt)
return output, loss
def update_scheduler(self,epoch):
if self.lr_scheduler is not None:
self.lr_scheduler.step(epoch)
for param_group in self.optimizer.param_groups:
print("the current epoch is {} with learining rate set at {}".format(epoch,param_group['lr']))
def optimize_parameters(self, input=None):
"""
forward and backward the model, optimize parameters and manage the learning rate
:param input: input(not used
:return:
"""
if self.is_train:
self.iter_count += 1
self.output, loss = self.forward()
self.backward_net(loss / self.criticUpdates)
self.loss = loss.item()
update_lr, lr = self.network.check_if_update_lr()
if update_lr:
self.update_learning_rate(lr)
if self.iter_count % self.criticUpdates == 0:
self.optimizer.step()
self.optimizer.zero_grad()
def get_current_errors(self):
return self.loss
def save_image_into_original_sz_with_given_reference(self):
"""
save the image into original image sz and physical coordinate, the path of reference image should be given
:return:
"""
pass
def get_evaluation(self):
sz =self.input_img_sz.squeeze().cpu().numpy().tolist()
if hasattr(self.network, 'set_file_path'):
self.network.set_file_path(self.img_path,self.fname_list)
self.network.set_img_sz(sz)
output_np = self.network.forward(self.input,self.is_train)
if self.gt is not None:
self.val_res_dic = get_multi_metric(output_np, self.gt, rm_bg=False)
self.output = output_np
def get_extra_to_plot(self):
"""
extra image to be visualized
:return: image (BxCxXxYxZ), name
"""
return self.network.get_extra_to_plot()
def set_train(self):
self.network.train(True)
self.is_train = True
torch.set_grad_enabled(True)
def set_val(self):
self.network.train(False)
self.is_train = False
torch.set_grad_enabled(False)
def set_debug(self):
self.network.train(False)
self.is_train = False
torch.set_grad_enabled(False)
def set_test(self):
self.network.train(False)
self.is_train = False
torch.set_grad_enabled(False)
| 8,036 | 33.943478 | 147 | py |
easyreg | easyreg-master/easyreg/affine_net.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .modules import *
from .net_utils import Bilinear
from torch.utils.checkpoint import checkpoint
from .utils import sigmoid_decay
from .losses import NCCLoss
class AffineNetSym(nn.Module):
"""
A multi-step symmetirc -force affine network
at each step. the network would update the affine parameter
the advantage is we don't need to warp the image for several time (only interpolated by the latest affine transform) as the interpolation would diffuse the image
"""
def __init__(self, img_sz=None, opt=None):
super(AffineNetSym, self).__init__()
self.img_sz = img_sz
""" the image sz in numpy coord"""
self.dim = len(img_sz)
""" the dim of image"""
self.step = opt['tsk_set']['reg']['affine_net'][('affine_net_iter',1,'num of step')]
""" the num of step"""
self.step_record = self.step
""" a copy on step"""
self.using_complex_net = opt['tsk_set']['reg']['affine_net'][('using_complex_net',True,'use complex version of affine net')]
"""if true, use complex version of affine net"""
self.acc_multi_step_loss = opt['tsk_set']['reg']['affine_net'][('acc_multi_step_loss',False,'accumulate loss at each step')]
"""accumulate loss from each step"""
self.initial_reg_factor = opt['tsk_set']['reg']['affine_net'][('initial_reg_factor', 10, 'initial regularization factor')]
"""initial regularization factor"""
self.min_reg_factor = opt['tsk_set']['reg']['affine_net'][('min_reg_factor', 1e-3, 'minimum regularization factor')]
"""minimum regularization factor"""
self.epoch_activate_multi_step = opt['tsk_set']['reg']['affine_net'][('epoch_activate_multi_step',-1,'epoch to activate multi-step affine')]
"""epoch to activate multi-step affine"""
self.reset_lr_for_multi_step = opt['tsk_set']['reg']['affine_net'][('reset_lr_for_multi_step',False,'if True, reset learning rate when multi-step begins')]
"""if True, reset learning rate when multi-step begins"""
self.lr_for_multi_step = opt['tsk_set']['reg']['affine_net'][('lr_for_multi_step',5e-5,'if reset_lr_for_multi_step, reset learning rate into # when multi-step begins')]
"""if reset_lr_for_multi_step, reset learning rate into # when multi-step begins"""
self.epoch_activate_sym = opt['tsk_set']['reg']['affine_net'][('epoch_activate_sym',-1,'epoch to activate symmetric forward')]
"""epoch to activate symmetric forward"""
self.sym_factor = opt['tsk_set']['reg']['affine_net'][('sym_factor', 1., 'the factor of symmetric loss')]
""" the factor of symmetric loss"""
self.mask_input_when_compute_loss = opt['tsk_set']['reg']['affine_net'][('mask_input_when_compute_loss', False, 'mask_input_when_compute_loss')]
""" mask input when compute loss"""
self.epoch_activate_sym_loss = opt['tsk_set']['reg']['affine_net'][('epoch_activate_sym_loss',-1,'the epoch to take symmetric loss into backward , only if epoch_activate_sym and epoch_activate_sym_loss')]
""" the epoch to take symmetric loss into backward , only if epoch_activate_sym and epoch_activate_sym_loss"""
self.epoch_activate_extern_loss = opt['tsk_set']['reg']['affine_net'][('epoch_activate_extern_loss',-1,'epoch to activate the external loss which will replace the default ncc loss')]
"""epoch to activate the external loss which will replace the default ncc loss"""
self.affine_fc_size = opt['tsk_set']['reg']['affine_net'][(
'affine_fc_size', 720, 'size of the full connected layer, changes depending on input size')]
"""epoch to activate the external loss which will replace the default ncc loss"""
self.affine_gen = Affine_unet_im(fc_size=self.affine_fc_size) if self.using_complex_net else Affine_unet()
""" the affine network output the affine parameter"""
self.affine_param = None
""" the affine parameter with the shape of Nx 12 for 3d transformation"""
self.affine_cons= AffineConstrain()
""" the func return regularization loss on affine parameter"""
self.id_map= gen_identity_map(self.img_sz).cuda()
""" the identity map"""
self.gen_identity_ap()
""" generate identity affine parameter"""
################### init variable #####################3
self.iter_count = 0
"""the num of iteration"""
self.using_multi_step = True
""" set multi-step on"""
self.zero_boundary = True
""" zero boundary is used for interpolated images"""
self.epoch = -1
""" the current epoch"""
self.ncc = NCCLoss()
""" normalized cross correlation loss"""
self.extern_loss = None
""" external loss used during training"""
self.compute_loss = True
""" compute loss, set true during affine network's training"""
def set_loss_fn(self, loss_fn):
""" set loss function"""
self.extern_loss = loss_fn
def set_cur_epoch(self, cur_epoch):
""" set current epoch"""
self.epoch = cur_epoch
def set_step(self,step):
"""set num of step"""
self.step = step
print("the step in affine network is set to {}".format(step))
def check_if_update_lr(self):
"""
check if the learning rate need to be updated
during affine training, both epoch_activate_multi_step and reset_lr_for_multi_step are activated
the learning rate would be set to reset_lr_for_multi_step
"""
if self.epoch == self.epoch_activate_multi_step and self.reset_lr_for_multi_step:
lr = self.lr_for_multi_step
self.reset_lr_for_multi_step = False
print("the lr is change into {} due to the activation of the multi-step".format(lr))
return True, lr
else:
return False, None
def gen_affine_map(self,Ab):
"""
generate the affine transformation map with regard to affine parameter
:param Ab: affine parameter
:return: affine transformation map
"""
Ab = Ab.view( Ab.shape[0],4,3) # 3d: (batch,4,3)
id_map = self.id_map.view(self.dim, -1)
affine_map = None
if self.dim == 3:
affine_map = torch.matmul( Ab[:,:3,:], id_map)
affine_map = Ab[:,3,:].contiguous().view(-1,3,1) + affine_map
affine_map= affine_map.view([Ab.shape[0]] + list(self.id_map.shape))
return affine_map
def update_affine_param(self, cur_af, last_af): # A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2
"""
update the current affine parameter A2 based on last affine parameter A1
A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2, results in the composed affine parameter A3=(A2A1, A2*b1+b2)
:param cur_af: current affine parameter
:param last_af: last affine parameter
:return: composed affine parameter A3
"""
cur_af = cur_af.view(cur_af.shape[0], 4, 3)
last_af = last_af.view(last_af.shape[0],4,3)
updated_af = torch.zeros_like(cur_af.data).cuda()
if self.dim==3:
updated_af[:,:3,:] = torch.matmul(cur_af[:,:3,:],last_af[:,:3,:])
updated_af[:,3,:] = cur_af[:,3,:] + torch.squeeze(torch.matmul(cur_af[:,:3,:], torch.transpose(last_af[:,3:,:],1,2)),2)
updated_af = updated_af.contiguous().view(cur_af.shape[0],-1)
return updated_af
def get_inverse_affine_param(self, affine_param):
"""
A2(A1*x+b1) +b2= A2A1*x + A2*b1+b2 = x A2= A1^-1, b2 = - A2^b1
"""
affine_param = affine_param.view(affine_param.shape[0], 4, 3)
inverse_param = torch.zeros_like(affine_param.data).cuda()
for n in range(affine_param.shape[0]):
tm_inv = torch.inverse(affine_param[n, :3, :])
inverse_param[n, :3, :] = tm_inv
inverse_param[n, 3, :] = - torch.matmul(tm_inv, affine_param[n, 3, :])
inverse_param = inverse_param.contiguous().view(affine_param.shape[0], -1)
return inverse_param
def __get_inverse_map(self):
sym_on = self.epoch>= self.epoch_activate_sym
affine_param = self.affine_param[:self.n_batch] if sym_on else self.affine_param
inverse_affine_param = self.get_inverse_affine_param(affine_param)
inverse_map = self.gen_affine_map(inverse_affine_param)
return inverse_map
def get_inverse_map(self,use_01=False):
"""
get the inverse map
:param use_01: if ture, get the map in [0,1] coord else in [-1,1] coord
:return: the inverse map
"""
inverse_map = self.__get_inverse_map()
if inverse_map is not None:
if use_01:
return (inverse_map+1)/2
else:
return inverse_map
else:
return None
def gen_identity_ap(self):
"""
get the idenityt affine parameter
:return:
"""
self.affine_identity = torch.zeros(12).cuda()
self.affine_identity[0] = 1.
self.affine_identity[4] = 1.
self.affine_identity[8] = 1.
def compute_symmetric_reg_loss(self,affine_param, bias_factor=1.):
"""
compute the symmetry loss
s-t transform (a,b), t-s transform (c,d), then assume transform from t-s-t
a(cy+d)+b = acy +ad+b =y
then ac = I, ad+b = 0
the l2 loss is taken to constrain the above two terms
||ac-I||_2^2 + bias_factor *||ad+b||_2^2
:param bias_factor: the factor on the translation term
:return: the symmetry loss (average on batch)
"""
ap_st, ap_ts = affine_param
ap_st = ap_st.view(-1, 4, 3)
ap_ts = ap_ts.view(-1, 4, 3)
ac = None
ad_b = None
######### check if ad_b is right #####
if self.dim == 3:
ac = torch.matmul(ap_st[:, :3, :], ap_ts[:, :3, :])
ad_b = ap_st[:, 3, :] + torch.squeeze(
torch.matmul(ap_st[:, :3, :], torch.transpose(ap_ts[:, 3:, :], 1, 2)), 2)
identity_matrix = self.affine_identity.view(4,3)[:3,:3]
linear_transfer_part = torch.sum((ac-identity_matrix)**2)
translation_part = bias_factor * (torch.sum(ad_b**2))
sym_reg_loss = linear_transfer_part + translation_part
if self.iter_count %10 ==0:
print("linear_transfer_part:{}, translation_part:{}, bias_factor:{}".format(linear_transfer_part.cpu().data.numpy(), translation_part.cpu().data.numpy(),bias_factor))
return sym_reg_loss/ap_st.shape[0]
def sim_loss(self,loss_fn,warped,target):
"""
compute the similarity loss
:param loss_fn: the loss function
:param output: the warped image
:param target: the target image
:return: the similarity loss average on batch
"""
loss_fn = self.ncc if self.epoch < self.epoch_activate_extern_loss else loss_fn
sim_loss = loss_fn(warped,target)
return sim_loss / warped.shape[0]
def scale_sym_reg_loss(self,affine_param, sched='l2'):
"""
in symmetric forward, compute regularization loss of affine parameters,
l2: compute the l2 loss between the affine parameter and the identity parameter
det: compute the determinant of the affine parameter, which prefers to rigid transformation
:param sched: 'l2' , 'det'
:return: the regularization loss on batch
"""
loss = self.scale_multi_step_reg_loss(affine_param[0],sched) + self.scale_multi_step_reg_loss(affine_param[1],sched)
return loss
def scale_multi_step_reg_loss(self,affine_param, sched='l2'):
"""
compute regularization loss of affine parameters,
l2: compute the l2 loss between the affine parameter and the identity parameter
det: compute the determinant of the affine parameter, which prefers to rigid transformation
:param sched: 'l2' , 'det'
:return: the regularization loss on batch
"""
weight_mask = torch.ones(4,3).cuda()
bias_factor = 1.0
weight_mask[3,:]=bias_factor
weight_mask = weight_mask.view(-1)
if sched == 'l2':
return torch.sum((self.affine_identity - affine_param) ** 2 *weight_mask)\
/ (affine_param.shape[0])
elif sched == 'det':
mean_det = 0.
for i in range(affine_param.shape[0]):
affine_matrix = affine_param[i, :9].contiguous().view(3, 3)
mean_det += torch.det(affine_matrix)
return mean_det / affine_param.shape[0]
def get_factor_reg_scale(self):
"""
get the regularizer factor according to training strategy
:return:
"""
epoch_for_reg = self.epoch if self.epoch < self.epoch_activate_multi_step else self.epoch - self.epoch_activate_multi_step
factor_scale = self.initial_reg_factor if self.epoch < self.epoch_activate_multi_step else self.initial_reg_factor/100
static_epoch = 10 if self.epoch < self.epoch_activate_multi_step else 1
min_threshold = self.min_reg_factor
decay_factor = 3
factor_scale = float(
max(sigmoid_decay(epoch_for_reg, static=static_epoch, k=decay_factor) * factor_scale, min_threshold))
return factor_scale
def compute_overall_loss(self, loss_fn, output, target,affine_map,moving_mask=None,target_mask=None):
"""
compute the overall loss for affine tranning
overall loss = multi-step similarity loss + symmetry loss + regularization loss
:param loss_fn: loss function to compute the similaritysym_reg_loss
:param output: warped image
:param target:target image
:return:overall loss
"""
if self.mask_input_when_compute_loss and moving_mask is not None and target_mask is not None:
affine_mask = Bilinear(self.zero_boundary)(moving_mask, affine_map)
output = output*affine_mask
target = target*target_mask
sim_loss = self.sim_loss(loss_fn.get_loss,output, target)
sym_on = self.epoch>= self.epoch_activate_sym
affine_param = (self.affine_param[:self.n_batch], self.affine_param[self.n_batch:]) if sym_on else self.affine_param
sym_reg_loss = self.compute_symmetric_reg_loss(affine_param,bias_factor=1.) if sym_on else 0.
scale_reg_loss = self.scale_sym_reg_loss(affine_param, sched = 'l2') if sym_on else self.scale_multi_step_reg_loss(affine_param, sched='l2')
factor_scale = self.get_factor_reg_scale()
factor_sym =self.sym_factor if self.epoch>= self.epoch_activate_sym_loss else 0.
sim_factor = 1.
loss = sim_factor*sim_loss + factor_sym * sym_reg_loss + factor_scale * scale_reg_loss
print_out_every_iter = (10* self.step) if self.epoch> self.epoch_activate_multi_step else 10
if self.iter_count%print_out_every_iter==0:
if self.epoch >= self.epoch_activate_sym:
print('sim_loss:{}, factor_sym: {}, sym_reg_loss: {}, factor_scale {}, scale_reg_loss: {}'.format(
sim_loss.item(),factor_sym,sym_reg_loss.item(),factor_scale,scale_reg_loss.item())
)
else:
print('sim_loss:{}, factor_scale {}, scale_reg_loss: {}'.format(
sim_loss.item(), factor_scale, scale_reg_loss.item())
)
return loss
def get_loss(self):
"""
:return: the overall loss
"""
return self.loss
def forward(self,moving, target,moving_mask=None, target_mask=None):
"""
forward the affine network
:param moving: moving image
:param target: target image
:return: warped image (intensity[-1,1]), transformation map (coord [-1,1]), affine param
"""
self.iter_count += 1
if self.epoch_activate_multi_step>0:
if self.epoch >= self.epoch_activate_multi_step:
if self.step_record != self.step:
print(" the multi step in affine network activated, multi step num: {}".format(self.step_record))
self.step = self.step_record
else:
self.step = 1
if self.epoch < self.epoch_activate_sym:
return self.multi_step_forward(moving, target,moving_mask, target_mask,compute_loss= self.compute_loss)
else:
return self.sym_multi_step_forward(moving, target,moving_mask, target_mask)
def multi_step_forward(self,moving,target, moving_mask=None, target_mask=None, compute_loss=True):
"""
mutli-step forward, A_t is composed of A_update and A_last
:param moving: the moving image
:param target: the target image
:param compute_loss: if true, compute the loss
:return: warped image (intensity[-1,1]), transformation map (coord [-1,1]), affine param
"""
output = None
moving_cp = moving
affine_param = None
affine_param_last = None
affine_map = None
bilinear = [Bilinear(self.zero_boundary) for i in range(self.step)]
self.loss = 0.
for i in range(self.step):
#affine_param = self.affine_gen(moving, target)
if i == 0:
affine_param = self.affine_gen(moving, target)
else:
affine_param = checkpoint(self.affine_gen, moving, target)
if i > 0:
affine_param = self.update_affine_param(affine_param, affine_param_last)
affine_param_last = affine_param
affine_map = self.gen_affine_map(affine_param)
output = bilinear[i](moving_cp, affine_map)
moving = output
self.affine_param = affine_param
if compute_loss and (i==self.step-1 or self.acc_multi_step_loss):
self.loss +=self.compute_overall_loss(self.extern_loss,output, target,affine_map,moving_mask,target_mask)
if compute_loss and self.acc_multi_step_loss:
self.loss = self.loss / self.step
return output, affine_map, affine_param
def sym_multi_step_forward(self, moving, target,moving_mask=None, target_mask=None):
"""
symmetry forward
the "source" is concatenated by source and target, the "target" is concatenated by target and source
the the multi step foward is called
:param moving:
:param target:
:return:
"""
self.n_batch = moving.shape[0]
moving_sym = torch.cat((moving, target), 0)
target_sym = torch.cat((target, moving), 0)
moving_mask_sym, target_mask_sym = None, None
if moving_mask is not None and target_mask is not None:
moving_mask_sym = torch.cat((moving_mask,target_mask),0)
target_mask_sym = torch.cat((target_mask,moving_mask),0)
output, affine_map, affine_param = self.multi_step_forward(moving_sym, target_sym, moving_mask_sym, target_mask_sym)
return output[:self.n_batch],affine_map[:self.n_batch], affine_param[:self.n_batch]
def get_extra_to_plot(self):
"""
no extra image need to be ploted
:return:
"""
return None, None
| 19,504 | 43.329545 | 212 | py |
easyreg | easyreg-master/easyreg/losses.py | # coding=utf-8
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import mermaid.finite_differences as fdt
###############################################################################
# Functions
###############################################################################
class Loss(object):
"""
implementation of loss function
current support list:
"l1": Lasso
"mse": mean square error
'ncc': normalize cross correlation
'lncc': localized normalized lncc (here, we implement the multi-kernel localized normalized lncc)
"""
def __init__(self,opt):
super(Loss,self).__init__()
cont_loss_type = opt['tsk_set']['loss'][('type',"ncc","loss type")]
class_num = opt['tsk_set']['seg'][('class_num',-1,"num of classes")]
if cont_loss_type == 'l1':
self.criterion = nn.L1Loss()
elif cont_loss_type == 'mse':
self.criterion = nn.MSELoss(size_average=True)
elif cont_loss_type =='ncc':
self.criterion = NCCLoss()
elif cont_loss_type =='lncc':
lncc = LNCCLoss()
lncc.initialize()
self.criterion =lncc
elif cont_loss_type =='glncc':
glncc_opt = opt['tsk_set']['loss']['glncc']
glncc = GaussianLNCC()
glncc.initialize(glncc_opt)
self.criterion =glncc
elif cont_loss_type =='empty':
self.criterion = None
elif cont_loss_type =='ce':
ce_opt = opt['tsk_set']['loss']['ce']
ce_opt['class_num'] = class_num
self.criterion = CrossEntropyLoss(ce_opt)
elif cont_loss_type == 'focal_loss':
focal_loss = FocalLoss()
focal_loss.initialize(class_num, alpha=None, gamma=2, size_average=True)
self.criterion = focal_loss
elif cont_loss_type == 'dice_loss':
dice_loss = DiceLoss()
dice_loss.initialize(class_num,None)
self.criterion =dice_loss
elif cont_loss_type == 'gdice_loss':
dice_loss = GeneralizedDiceLoss()
dice_loss.initialize(class_num,None)
self.criterion =dice_loss
elif cont_loss_type == 'tdice_loss':
dice_loss = TverskyLoss()
dice_loss.initialize(class_num,None)
self.criterion =dice_loss
else:
raise ValueError("Model [%s] not recognized." % opt.model)
def get_loss(self,output, gt, inst_weights=None, train=False):
if self.criterion is not None:
return self.criterion(output,gt)
class NCCLoss(nn.Module):
"""
A implementation of the normalized cross correlation (NCC)
"""
def forward(self,input, target, mask=None):
input = input.view(input.shape[0], -1)
target = target.view(target.shape[0], -1)
mask = None if mask is None else mask.view(mask.shape[0], -1)
input_minus_mean = input - torch.mean(input, 1).view(input.shape[0],1)
target_minus_mean = target - torch.mean(target, 1).view(input.shape[0],1)
if mask is None:
nccSqr = ((input_minus_mean * target_minus_mean).mean(1)) / (torch.sqrt(
((input_minus_mean ** 2).mean(1)) * ((target_minus_mean ** 2).mean(1)))+1e-7)
else:
nccSqr = ((input_minus_mean * target_minus_mean*mask).mean(1)) / (torch.sqrt(
((input_minus_mean ** 2).mean(1)) * ((target_minus_mean ** 2).mean(1))) + 1e-7)
nccSqr = nccSqr.mean()
return (1 - nccSqr)*input.shape[0]
class LNCCLoss(nn.Module):
"""This is an generalized LNCC; we implement multi-scale (means resolution)
multi kernel (means size of neighborhood) LNCC.
:param: resol_bound : type list, resol_bound[0]> resol_bound[1] >... resol_bound[end]
:param: kernel_size_ratio: type list, the ratio of the current input size
:param: kernel_weight_ratio: type list, the weight ratio of each kernel size, should sum to 1
:param: stride: type_list, the stride between each pixel that would compute its lncc
:param: dilation: type_list
Settings in json::
"similarity_measure": {
"develop_mod_on": false,
"sigma": 0.5,
"type": "lncc",
"lncc":{
"resol_bound":[-1],
"kernel_size_ratio":[[0.25]],
"kernel_weight_ratio":[[1.0]],
"stride":[0.25,0.25,0.25],
"dilation":[1]
}
For multi-scale multi kernel, e.g.,::
"resol_bound":[64,32],
"kernel_size_ratio":[[0.0625,0.125, 0.25], [0.25,0.5], [0.5]],
"kernel_weight_ratio":[[0.1,0.3,0.6],[0.3,0.7],[1.0]],
"stride":[0.25,0.25,0.25],
"dilation":[1,2,2] #[2,1,1]
or for single-scale single kernel, e.g.,::
"resol_bound":[-1],
"kernel_size_ratio":[[0.25]],
"kernel_weight_ratio":[[1.0]],
"stride":[0.25],
"dilation":[1]
Multi-scale is controlled by "resol_bound", e.g resol_bound = [128, 64], it means if input size>128, then it would compute multi-kernel
lncc designed for large image size, if 64<input_size<128, then it would compute multi-kernel lncc desiged for mid-size image, otherwise,
it would compute the multi-kernel lncc designed for small image.
Attention! we call it multi-scale just because it is designed for multi-scale registration or segmentation problem.
ONLY ONE scale would be activated during computing the similarity, which depends on the current input size.
At each scale, corresponding multi-kernel lncc is implemented, here multi-kernel means lncc with different window sizes
Loss = w1*lncc_win1 + w2*lncc_win2 ... + wn*lncc_winn, where /sum(wi) =1
for example. when (image size) S>128, three windows sizes can be used, namely S/16, S/8, S/4.
for easy notation, we use img_ratio to refer window size, the example here use the parameter [1./16,1./8,1.4]
In implementation, we compute lncc by calling convolution function, so in this case, the [S/16, S/8, S/4] refers
to the kernel size of convolution function. Intuitively, we would have another two parameters,
stride and dilation. For each window size (W), we recommend using W/4 as stride. In extreme case the stride can be 1, but
can large increase computation. The dilation expand the reception field, set dilation as 2 would physically twice the window size.
"""
def initialize(self, kernel_sz = [9,9,9], voxel_weights = None):
pass
def __stepup(self,img_sz, use_multi_scale=True):
max_scale = min(img_sz)
if use_multi_scale:
if max_scale>128:
self.scale = [int(max_scale/16), int(max_scale/8), int(max_scale/4)]
self.scale_weight = [0.1, 0.3, 0.6]
self.dilation = [2,2,2]
elif max_scale>64:
self.scale = [int(max_scale / 4), int(max_scale / 2)]
self.scale_weight = [0.3,0.7]
self.dilation = [2,2]
else :
self.scale = [int(max_scale / 2)]
self.scale_weight = [1.0]
self.dilation = [1]
else:
self.scale_weight = [int(max_scale/4)]
self.scale_weight = [1.0]
self.num_scale = len(self.scale)
self.kernel_sz = [[scale for _ in range(3)] for scale in self.scale]
self.step = [[max(int((ksz + 1) / 4),1) for ksz in self.kernel_sz[scale_id]] for scale_id in range(self.num_scale)]
self.filter = [torch.ones([1, 1] + self.kernel_sz[scale_id]).cuda() for scale_id in range(self.num_scale)]
self.conv = F.conv3d
def forward(self, input, target):
self.__stepup(img_sz=list(input.shape[2:]))
input_2 = input ** 2
target_2 = target ** 2
input_target = input * target
lncc_total = 0.
for scale_id in range(self.num_scale):
input_local_sum = self.conv(input, self.filter[scale_id], padding=0, dilation=self.dilation[scale_id],
stride=self.step[scale_id]).view(input.shape[0], -1)
target_local_sum = self.conv(target, self.filter[scale_id], padding=0, dilation=self.dilation[scale_id],
stride=self.step[scale_id]).view(input.shape[0],
-1)
input_2_local_sum = self.conv(input_2, self.filter[scale_id], padding=0, dilation=self.dilation[scale_id],
stride=self.step[scale_id]).view(input.shape[0],
-1)
target_2_local_sum = self.conv(target_2, self.filter[scale_id], padding=0, dilation=self.dilation[scale_id],
stride=self.step[scale_id]).view(
input.shape[0], -1)
input_target_local_sum = self.conv(input_target, self.filter[scale_id], padding=0,
dilation=self.dilation[scale_id], stride=self.step[scale_id]).view(
input.shape[0], -1)
input_local_sum = input_local_sum.contiguous()
target_local_sum = target_local_sum.contiguous()
input_2_local_sum = input_2_local_sum.contiguous()
target_2_local_sum = target_2_local_sum.contiguous()
input_target_local_sum = input_target_local_sum.contiguous()
numel = float(np.array(self.kernel_sz[scale_id]).prod())
input_local_mean = input_local_sum / numel
target_local_mean = target_local_sum / numel
cross = input_target_local_sum - target_local_mean * input_local_sum - \
input_local_mean * target_local_sum + target_local_mean * input_local_mean * numel
input_local_var = input_2_local_sum - 2 * input_local_mean * input_local_sum + input_local_mean ** 2 * numel
target_local_var = target_2_local_sum - 2 * target_local_mean * target_local_sum + target_local_mean ** 2 * numel
lncc = cross * cross / (input_local_var * target_local_var + 1e-5)
lncc = 1 - lncc.mean()
lncc_total += lncc * self.scale_weight[scale_id]
return lncc_total*(input.shape[0])
from mermaid.smoother_factory import SingleGaussianFourierSmoother
class GaussianLNCC(nn.Module):
def initialize(self, params):
self.params = params
self.smoother_buffer = {}
def get_buffer_smoother(self, sz):
sz = tuple(sz)
if sz not in self.smoother_buffer:
spacing = 1./(np.array(sz)-1)
self.smoother_buffer[sz] =SingleGaussianFourierSmoother(sz, spacing, self.params)
self.smoother = self.smoother_buffer[sz]
def forward(self, input, target):
self.get_buffer_smoother(list(input.shape[2:]))
sm_input = self.smoother.smooth(input)
sm_inputsq = self.smoother.smooth(input**2)
sm_target =self.smoother.smooth(target)
sm_targetsq = self.smoother.smooth(target**2)
sm_inputtarget = self.smoother.smooth(input*target)
#lncc = ((sm_inputtarget - sm_input*sm_target)**2)/((sm_inputsq-sm_input**2)*(sm_targetsq-sm_target**2))
lncc = torch.exp(torch.log(sm_inputtarget - sm_input*sm_target) - 0.5*torch.log(sm_inputsq-sm_input**2)-0.5*torch.log(sm_targetsq-sm_target**2))
lncc = 1- lncc.mean()
return lncc
class FocalLoss(nn.Module):
"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def initialize(self, class_num, alpha=None, gamma=2, size_average=True, verbose=True):
if alpha is None:
self.alpha = torch.ones(class_num, 1)
else:
self.alpha = alpha
self.alpha = torch.squeeze(self.alpha)
if verbose:
print("the alpha of focal loss is {}".format(alpha))
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets, weight= None, inst_weights=None, train=None):
"""
:param inputs: Bxn_classxXxYxZ
:param targets: Bx..... , range(0,n_class)
:return:
"""
inputs = inputs.permute(0, 2, 3, 4, 1).contiguous().view(-1, inputs.size(1))
targets = targets.view(-1)
P = F.softmax(inputs,dim=1)
ids = targets.view(-1)
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
alpha = self.alpha[ids.data.view(-1)]
log_p = - F.cross_entropy(inputs, targets,reduce=False)
probs = F.nll_loss(P, targets,reduce=False)
# print(probs)
# print(log_p)
# print(torch.pow((1 - probs), self.gamma))
# print(alpha)
batch_loss = -alpha * (torch.pow((1 - probs), self.gamma)) * log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class DiceLoss(nn.Module):
def initialize(self, class_num, weight = None):
self.class_num = class_num
self.class_num = class_num
if weight is None:
self.weight =torch.ones(class_num, 1)/self.class_num
else:
self.weight = weight
self.weight = torch.squeeze(self.weight)
def forward(self,input, target, inst_weights=None,train=None):
"""
input is a torch variable of size BatchxnclassesxHxWxD representing log probabilities for each class
target is a Bx.... range 0,1....N_label
"""
in_sz = input.size()
from functools import reduce
extra_dim = reduce(lambda x,y:x*y,in_sz[2:])
targ_one_hot = torch.zeros(in_sz[0],in_sz[1],extra_dim).cuda()
targ_one_hot.scatter_(1,target.view(in_sz[0],1,extra_dim),1.)
target = targ_one_hot.view(in_sz).contiguous()
probs = F.softmax(input,dim=1)
num = probs*target
num = num.view(num.shape[0],num.shape[1],-1)
num = torch.sum(num, dim=2)
den1 = probs#*probs
den1 = den1.view(den1.shape[0], den1.shape[1], -1)
den1 = torch.sum(den1, dim=2)
den2 = target#*target
den2 = den1.view(den2.shape[0], den2.shape[1], -1)
den2 = torch.sum(den2, dim=2)
# print("den1:{}".format(sum(sum(den1))))
# print("den2:{}".format(sum(sum(den2/den1))))
dice = 2 * (num / (den1 + den2))
dice = self.weight.expand_as(dice) * dice
dice_eso = dice
# dice_eso = dice[:, 1:] # we ignore bg dice val, and take the fg
dice_total = -1 * torch.sum(dice_eso) / dice_eso.size(0) # divide by batch_sz
return dice_total
class GeneralizedDiceLoss(nn.Module):
def initialize(self, class_num, weight=None):
self.class_num = class_num
if weight is None:
self.weight =torch.ones(class_num, 1)
else:
self.weight = weight
self.weight = torch.squeeze(self.weight)
def forward(self,input, target,inst_weights=None,train=None):
"""
input is a torch variable of size BatchxnclassesxHxWxD representing log probabilities for each class
target is a Bx.... range 0,1....N_label
"""
in_sz = input.size()
from functools import reduce
extra_dim = reduce(lambda x,y:x*y,in_sz[2:])
targ_one_hot = torch.zeros(in_sz[0],in_sz[1],extra_dim).cuda()
targ_one_hot.scatter_(1,target.view(in_sz[0],1,extra_dim),1.)
target = targ_one_hot.view(in_sz).contiguous()
probs = F.softmax(input,dim=1)
num = probs*target
num = num.view(num.shape[0],num.shape[1],-1)
num = torch.sum(num, dim=2) # batch x ch
den1 = probs
den1 = den1.view(den1.shape[0], den1.shape[1], -1)
den1 = torch.sum(den1, dim=2) # batch x ch
den2 = target
den2 = den1.view(den2.shape[0], den2.shape[1], -1)
den2 = torch.sum(den2, dim=2) # batch x ch
# print("den1:{}".format(sum(sum(den1))))
# print("den2:{}".format(sum(sum(den2/den1))))
weights = self.weight.expand_as(den1)
dice = 2 * (torch.sum(weights*num,dim=1) / torch.sum(weights*(den1 + den2),dim=1))
dice_eso = dice
# dice_eso = dice[:, 1:] # we ignore bg dice val, and take the fg
dice_total = -1 * torch.sum(dice_eso) / dice_eso.size(0) # divide by batch_sz
return dice_total
class TverskyLoss(nn.Module):
def initialize(self, class_num, weight=None, alpha=0.5, beta=0.5):
self.class_num = class_num
if weight is None:
self.weight = torch.ones(class_num, 1)/self.class_num
else:
self.weight = weight
self.weight = torch.squeeze(self.weight)
self.alpha = alpha
self.beta = beta
print("the weight of Tversky loss is {}".format(weight))
def forward(self,input, target,inst_weights=None, train=None):
"""
input is a torch variable of size BatchxnclassesxHxWxD representing log probabilities for each class
target is a Bx.... range 0,1....N_label
"""
in_sz = input.size()
from functools import reduce
extra_dim = reduce(lambda x,y:x*y,in_sz[2:])
targ_one_hot = torch.zeros(in_sz[0],in_sz[1],extra_dim).cuda()
targ_one_hot.scatter_(1,target.view(in_sz[0],1,extra_dim),1.)
target = targ_one_hot.view(in_sz).contiguous()
probs = F.softmax(input,dim=1)
num = probs*target
num = num.view(num.shape[0],num.shape[1],-1)
num = torch.sum(num, dim=2)
den1 = probs*(1-target)
den1 = den1.view(den1.shape[0], den1.shape[1], -1)
den1 = torch.sum(den1, dim=2)
den2 = (1-probs)*target
den2 = den1.view(den2.shape[0], den2.shape[1], -1)
den2 = torch.sum(den2, dim=2)
# print("den1:{}".format(sum(sum(den1))))
# print("den2:{}".format(sum(sum(den2/den1))))
dice = 2 * (num / (num + self.alpha*den1 + self.beta*den2))
dice = self.weight.expand_as(dice) * dice
dice_eso = dice
#dice_eso = dice[:, 1:] # we ignore bg dice val, and take the fg
dice_total = -1 * torch.sum(dice_eso) / dice_eso.size(0) # divide by batch_sz
return dice_total
class CrossEntropyLoss(nn.Module):
def __init__(self, opt, imd_weight=None):
# To Do, add dynamic weight
super(CrossEntropyLoss,self).__init__()
no_bg = opt[('no_bg',False,'exclude background')]
weighted = opt[('weighted',False,' weighted the class')]
reduced = opt[('reduced',True,' reduced the class')]
self.mask = None #opt[('mask',None, 'masked other label')]
class_num = opt['class_num']
if no_bg:
self.loss_fn = nn.CrossEntropyLoss(ignore_index=-100)
if weighted:
class_weight = opt['class_weight']if imd_weight is None else imd_weight
if class_weight is not None and not (len(class_weight)< class_num):
self.loss_fn = nn.CrossEntropyLoss(weight=class_weight, reduce = reduced)
self.mask=None
else: # this is the case for using random mask, the class weight here refers to the label need be masked
self.mask = class_weight
print("the current mask is {}".format(self.mask))
self.loss_fn = nn.CrossEntropyLoss()
else:
self.loss_fn = nn.CrossEntropyLoss(reduce = reduced)
self.n_class = class_num
def forward(self, input, gt, inst_weights= None, train=False):
"""
:param inputs: Bxn_classxXxYxZ
:param targets: Bx..... , range(0,n_class)
:return:
"""
if self.mask is not None and train:
for m in self.mask:
gt[gt==m]=0
if len(input.shape)==5:
output_flat = input.permute(0, 2, 3, 4, 1).contiguous().view(-1, self.n_class)
else:
output_flat = input
truths_flat = gt.view(-1)
if inst_weights is None:
return self.loss_fn(output_flat,truths_flat)
else:
return torch.mean( inst_weights.view(-1)*self.loss_fn(output_flat,truths_flat))
| 21,306 | 38.826168 | 152 | py |
easyreg | easyreg-master/easyreg/seg_unet.py | from .modules import Seg_resid
from .utils import *
import torch.nn as nn
from data_pre.partition import partition
class SegUnet(nn.Module):
def __init__(self, opt=None):
super(SegUnet, self).__init__()
self.opt = opt
seg_opt = opt['tsk_set'][('seg',{},"settings for seg task")]
self.is_train = opt['tsk_set']["train"]
self.num_class = seg_opt['class_num',-1,"the num of class"]
use_bn = seg_opt["use_bn", True, "use the batch normalization"]
patch_sz = opt['dataset']['seg']['patch_size',[-1,-1,-1],"the size of input patch"]
overlap_sz = opt['dataset']['seg']['partition']['overlap_size',[-1,-1,-1],"the size of input patch"]
patch_sz_itk = list(np.flipud(np.array(patch_sz)))
overlap_sz_itk = list(np.flipud(np.array(overlap_sz)))
self.img_sz = None
self.unet = Seg_resid(self.num_class,bn=use_bn)
self.print_count = 0
self.partition = partition(opt['dataset']['seg']['partition'],patch_sz_itk,overlap_sz_itk)
self.ensemble_during_the_test = opt['tsk_set']['seg'][("ensemble_during_the_test",False,"do test phase ensemble, which needs the test phase data augmentation already done")]
def set_loss_fn(self, loss_fn):
""" set loss function"""
self.loss_fn = loss_fn
def get_loss(self, output, gt):
loss = self.loss_fn.get_loss(output,gt)
return loss
def check_if_update_lr(self):
return False, None
def set_img_sz(self, img_sz):
self.img_sz = img_sz
def forward(self, input, is_train=True):
if is_train:
output = self.unet(input)
else:
with torch.no_grad():
if not self.is_train and self.ensemble_during_the_test:
output = self.get_assemble_ensemble(input)
else:
output = self.get_assemble_pred(input)
self.print_count += 1
return output
def get_assemble_pred(self, input, split_size=6):
output = []
input_split = torch.split(input, split_size)
for input_sub in input_split:
res = self.forward(input_sub)
if isinstance(res, list):
res = res[-1]
output.append(res.detach().cpu())
pred_patched = torch.cat(output, dim=0)
pred_patched = torch.max(pred_patched, 1)[1]
output_np = self.partition.assemble(pred_patched,image_size=self.img_sz)
return output_np
def set_file_path(self, file_path, fname):
self.file_path =file_path
self.fname = fname
def get_assemble_pred_for_ensemble(self, input, split_size=6):
output = []
input_split = torch.split(input, split_size)
for input_sub in input_split:
res = self.forward(input_sub)
if isinstance(res, list):
res = res[-1]
output.append(res.detach().cpu())
pred_patched = torch.cat(output, dim=0)
return pred_patched
def get_assemble_ensemble(self, input):
import os
from .reg_data_utils import read_txt_into_list, get_file_name
from tools.image_rescale import save_image_with_given_reference
import SimpleITK as sitk
import torch
import numpy as np
from glob import glob
from copy import deepcopy
from mermaid.utils import compute_warped_image_multiNC
patch_sz = self.opt['dataset']['seg']['patch_size', [-1, -1, -1], "the size of input patch"]
overlap_sz = self.opt['dataset']['seg']['partition']['overlap_size', [-1, -1, -1], "the size of input patch"]
option_p = self.opt['dataset']['seg'][('partition', {}, "settings for the partition")]
patch_sz_itk = list(np.flipud(np.array(patch_sz)))
overlap_sz_itk = list(np.flipud(np.array(overlap_sz)))
corr_partition_pool = deepcopy(partition(option_p, patch_sz_itk, overlap_sz_itk))
def compute_warped_image_label(input, warped_pth, warped_type,inv_phi_pth,inv_switcher,num_max=50,weight_for_orig_img=0,nomralize_type="same_as_refer"):
warped_pth_list = glob(os.path.join(warped_pth, warped_type))
num_max = min(len(warped_pth_list),num_max)
inv_phi_pth_list = [pth.replace(warped_pth,inv_phi_pth).replace(*inv_switcher) for pth in warped_pth_list]
f = lambda pth: sitk.GetArrayFromImage(sitk.ReadImage(pth))
fname = get_file_name(self.fname[0])
f_warped = lambda x: get_file_name(x).find(fname+'_') == 0
warped_sub_list = list(filter(f_warped, warped_pth_list))
inv_phi_sub_list = list(filter(f_warped, inv_phi_pth_list))
warped_sub_list = warped_sub_list[:num_max]
inv_phi_sub_list = inv_phi_sub_list[:num_max]
num_aug = len(warped_sub_list)
warped_list = [f(pth) for pth in warped_sub_list]
inv_phi_list = [f(pth) for pth in inv_phi_sub_list]
warped_img = np.stack(warped_list, 0)[:,None]
#warped_img = torch.Tensor(warped_img)*2-1.
warped_img = self.normalize_input(warped_img,nomralize_type,None)#self.file_path[0][0])
warped_img = torch.Tensor(warped_img)
inv_phi = np.stack(inv_phi_list, 0)
inv_phi = np.transpose(inv_phi, (0, 4, 3, 2, 1))
inv_phi = torch.Tensor(inv_phi)
img_input_sz = self.opt["dataset"]["img_after_resize"]
differ_sz = any(np.array(warped_img.shape[2:]) != np.array(img_input_sz))
sz = np.array(self.img_sz)
spacing = 1. / (sz - 1)
output_np = np.zeros([1, self.num_class] + self.img_sz)
if weight_for_orig_img!=0:
tzero_img = self.get_assemble_pred_for_ensemble(input)
tzero_pred = self.partition.assemble_multi_torch(tzero_img, image_size=self.img_sz)
output_np = tzero_pred.cpu().numpy() * float(round(weight_for_orig_img*num_aug))
for i in range(num_aug):
if differ_sz:
warped_img_cur, _ = resample_image(warped_img[i:i+1].cuda(), [1, 1, 1], [1, 3] + self.img_sz)
inv_phi_cur, _ = resample_image(inv_phi[i:i+1].cuda(), [1, 1, 1], [1, 1] + self.img_sz)
warped_img_cur = warped_img_cur.detach().cpu()
inv_phi_cur = inv_phi_cur.detach().cpu()
else:
warped_img_cur = warped_img[i:i+1]
inv_phi_cur = inv_phi[i:i+1]
sample = {"image":[warped_img_cur[0,0].numpy()]}
sample_p =corr_partition_pool(sample)
pred_patched = self.get_assemble_pred_for_ensemble(torch.Tensor(sample_p["image"]).cuda())
pred_patched = self.partition.assemble_multi_torch(pred_patched, image_size=self.img_sz)
pred_patched = torch.nn.functional.softmax(pred_patched,1)
pred_patched = compute_warped_image_multiNC(pred_patched.cuda(), inv_phi_cur.cuda(),spacing, spline_order=1, zero_boundary=True)
output_np += pred_patched.cpu().numpy()
res = torch.max(torch.Tensor(output_np), 1)[1]
return res[None]
seg_ensemble_opt = self.opt['tsk_set']['seg'][("seg_ensemble",{},"settings of test phase data ensemble")]
warped_pth = seg_ensemble_opt[("warped_pth", None,"the folder path containing the warped image from the original image")]
warped_type = seg_ensemble_opt[("warped_type","*_warped.nii.gz","the suffix of the augmented data")]
inv_phi_pth = seg_ensemble_opt[("inv_phi_pth",None,"the folder path containing the inverse transformation")]
inv_switcher = seg_ensemble_opt[("inv_switcher",["_warped.nii.gz","_inv_phi.nii.gz"],"the fname switcher from warped image to inverse transformation map")]
num_max = seg_ensemble_opt[("num_max",20,"max num of augmentation for per test image")]
weight_for_orig_img = seg_ensemble_opt[("weight_for_orig_img",0.0,"the weight of original image")]
nomralize_type = seg_ensemble_opt[("nomralize_type","same_as_refer","'same_as_refer'/'same_as_loader")]
output_np = compute_warped_image_label(input, warped_pth, warped_type,inv_phi_pth,inv_switcher,num_max=num_max,weight_for_orig_img=weight_for_orig_img, nomralize_type=nomralize_type)
return output_np
def normalize_input(self,img,nomralize_type,refer_img_path):
if nomralize_type == "same_as_refer":
return self.normalize_from_reference(img,refer_img_path)
if nomralize_type == "same_as_loader":
return self.normalize_as_dataloader(img)
else:
raise ValueError(
"the testing phase augmentation normalize type should be either 'same_as_refer'/'same_as_loader'")
def normalize_from_reference(self,img,refer_img_path):
import SimpleITK as sitk
if refer_img_path is not None:
refer_img = sitk.GetArrayFromImage(sitk.ReadImage(refer_img_path))
else:
refer_img = img
min_intensity = refer_img.min()
max_intensity = refer_img.max()
normalized_img = (img - refer_img.min()) / (max_intensity - min_intensity)
normalized_img = normalized_img * 2 - 1
return normalized_img
def normalize_as_dataloader(self, img):
"""
a numpy image, normalize into intensity [-1,1]
(img-img.min())/(img.max() - img.min())
:param img: image
:param percen_clip: Linearly normalized image intensities so that the 95-th percentile gets mapped to 0.95; 0 stays 0
:param range_clip: Linearly normalized image intensities from (range_clip[0], range_clip[1]) to 0,1
:return
"""
dataset_opt = self.opt["dataset"]
normalize_via_percentage_clip = dataset_opt[('normalize_via_percentage_clip',-1,"normalize the image via percentage clip, the given value is in [0-1]")]
normalize_via_range_clip = dataset_opt[
('normalize_via_range_clip', (-1, -1), "normalize the image via range clip")]
if normalize_via_percentage_clip>0:
img = img - img.min()
normalized_img = img / np.percentile(img, 95) * 0.95
else:
range_clip = normalize_via_range_clip
if range_clip[0]<range_clip[1]:
img = np.clip(img,a_min=range_clip[0], a_max=range_clip[1])
min_intensity = img.min()
max_intensity = img.max()
normalized_img = (img - img.min()) / (max_intensity - min_intensity)
normalized_img = normalized_img * 2 - 1
return normalized_img
| 10,717 | 47.497738 | 190 | py |
easyreg | easyreg-master/easyreg/seg_data_loader_onfly.py | from __future__ import print_function, division
import blosc
import torch
from torch.utils.data import Dataset
from data_pre.seg_data_utils import *
from data_pre.transform import Transform
import SimpleITK as sitk
from multiprocessing import *
blosc.set_nthreads(1)
import progressbar as pb
from copy import deepcopy
import random
import time
class SegmentationDataset(Dataset):
"""segmentation dataset.
if the data are loaded into memory, we provide data processing option like image resampling and label filtering
if not, for efficiency, we assume the data are preprocessed and the image resampling still works but the label filtering are disabled
"""
def __init__(self, data_path,phase, transform=None, option = None):
"""
:param data_path: string, path to processed data
:param transform: function, apply transform on data
"""
self.data_path = data_path
self.phase = phase
self.transform = transform
ind = ['train', 'val', 'test', 'debug'].index(phase)
max_num_for_loading=option['max_num_for_loading',(-1,-1,-1,-1),"the max number of pairs to be loaded, set -1 if there is no constraint,[max_train, max_val, max_test, max_debug]"]
self.max_num_for_loading = max_num_for_loading[ind]
self.has_label = False
self.get_file_list()
self.seg_option = option['seg']
self.img_after_resize = option[('img_after_resize', [-1, -1, -1], "numpy coordinate, resample the image into desired size")]
self.normalize_via_percentage_clip = option[('normalize_via_percentage_clip',-1,"normalize the image via percentage clip, the given value is in [0-1]")]
self.normalize_via_range_clip = option[('normalize_via_range_clip',(-1,-1),"normalize the image via range clip")]
self.img_after_resize = None if any([sz == -1 for sz in self.img_after_resize]) else self.img_after_resize
self.patch_size = self.seg_option['patch_size']
self.interested_label_list = self.seg_option['interested_label_list',[-1],"the label to be evaluated, the label not in list will be turned into 0 (background)"]
self.interested_label_list = None if any([label == -1 for label in self.interested_label_list]) else self.interested_label_list
self.transform_name_seq = self.seg_option['transform']['transform_seq']
self.option_p = self.seg_option[('partition', {}, "settings for the partition")]
self.use_whole_img_as_input = self.seg_option[('use_whole_img_as_input',False,"use whole image as the input")]
self.load_into_memory = True
self.img_list = []
self.img_sz_list = []
self.original_spacing_list = []
self.original_sz_list = []
self.spacing_list = []
self.label_org_index_list = []
self.label_converted_index_list = []
self.label_density_list = []
if self.load_into_memory:
self.init_img_pool()
print('img pool initialized complete')
if self.phase=='train':
self.init_corr_transform_pool()
print('transforms initialized complete')
else:
self.init_corr_partition_pool()
print("partition pool initialized complete")
blosc.set_nthreads(1)
def get_file_list(self):
"""
get the all files belonging to data_type from the data_path,
:return: full file path list, file name list
"""
if not os.path.exists(self.data_path):
self.path_list = []
self.name_list = []
self.init_weight_list = []
return
self.path_list = read_txt_into_list(os.path.join(self.data_path, 'file_path_list.txt'))
if len(self.path_list[0]) == 2:
self.has_label = True
elif self.phase in ["train", "val", "debug"]:
raise ValueError("the label must be provided during training")
if not self.has_label:
self.path_list= [[path] for path in self.path_list]
file_name_path = os.path.join(self.data_path, 'file_name_list.txt')
if os.path.isfile(file_name_path):
self.name_list = read_txt_into_list(file_name_path)
else:
self.name_list = [get_file_name(self.path_list[i][0]) for i in range(len(self.path_list))]
if self.max_num_for_loading>0:
read_num = min(self.max_num_for_loading, len(self.path_list))
if self.phase=='train':
index =list(range(len(self.path_list)))
random.shuffle(index)
self.path_list = [self.path_list[ind] for ind in index ]
self.name_list = [self.name_list[ind] for ind in index ]
self.path_list = self.path_list[:read_num]
self.name_list = self.name_list[:read_num]
# if len(self.name_list)==0:
# self.name_list = ['img_{}'.format(idx) for idx in range(len(self.path_list))]
self.num_img = len(self.name_list)
def __read_img_label_into_zipnp(self,img_label_path_dic,img_label_dic):
pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(img_label_path_dic)).start()
count = 0
for fn, img_label_path in img_label_path_dic.items():
img_label_np_dic = {}
img_sitk, original_spacing, original_sz = self.__read_and_clean_itk_info(img_label_path['image'])
resized_img, resize_factor = self.resize_img(img_sitk)
img_np = sitk.GetArrayFromImage(resized_img)
img_np = self.normalize_intensity(img_np)
img_label_np_dic['image'] = blosc.pack_array(img_np.astype(np.float32))
if self.has_label:
label_sitk, _, _ = self.__read_and_clean_itk_info(img_label_path['label'])
resized_label,_ = self.resize_img(label_sitk,is_label=True)
label_np = sitk.GetArrayFromImage(resized_label)
label_index = list(np.unique(label_np))
img_label_np_dic['label'] = blosc.pack_array(label_np.astype(np.int64))
img_label_np_dic['label_index'] = label_index
img_after_resize = self.img_after_resize if self.img_after_resize is not None else original_sz
new_spacing= original_spacing*(original_sz-1)/(np.array(img_after_resize)-1)
normalized_spacing = self._normalize_spacing(new_spacing,img_after_resize, silent_mode=True)
img_label_np_dic['original_sz'] =original_sz
img_label_np_dic['original_spacing'] = original_spacing
img_label_np_dic['spacing'] = normalized_spacing
img_label_np_dic['img_sz'] = list(img_np.shape)
img_label_dic[fn] =img_label_np_dic
count +=1
pbar.update(count)
pbar.finish()
def _normalize_spacing(self,spacing,sz,silent_mode=False):
"""
Normalizes spacing.
:param spacing: Vector with spacing info, in XxYxZ format
:param sz: size vector in XxYxZ format
:return: vector with normalized spacings in XxYxZ format
"""
dim = len(spacing)
# first determine the largest extent
current_largest_extent = -1
extent = np.zeros_like(spacing)
for d in range(dim):
current_extent = spacing[d]*(sz[d]-1)
extent[d] = current_extent
if current_extent>current_largest_extent:
current_largest_extent = current_extent
scalingFactor = 1./current_largest_extent
normalized_spacing = spacing*scalingFactor
normalized_extent = extent*scalingFactor
if not silent_mode:
print('Normalize spacing: ' + str(spacing) + ' -> ' + str(normalized_spacing))
print('Normalize spacing, extent: ' + str(extent) + ' -> ' + str(normalized_extent))
return normalized_spacing
def __convert_to_standard_label_map(self, label_map, interested_label_list):
label_map =blosc.unpack_array(label_map)
cur_label_list = list(np.unique(label_map)) # unique func orders the elements
if set(cur_label_list) == set(interested_label_list):
return label_map
for l_id in cur_label_list:
if l_id in interested_label_list:
st_index = interested_label_list.index(l_id)
else:
# assume background label is 0
st_index = 0
print("warning label: {} is not in interested label index, and would be convert to 0".format(l_id))
label_map[np.where(label_map == l_id)] = st_index
return label_map
def __get_clean_label(self,img_label_dict, img_name_list):
"""
:param img_label_dict:
:param img_name_list:
:return:
"""
print(" Attention, the annotation for background is assume to be 0 ! ")
print(" Attention, we are using the union set of the label! ")
if self.interested_label_list is None:
interested_label_set = set()
for i, fname in enumerate(img_name_list):
label_set = img_label_dict[fname]['label_index']
if i ==0:
interested_label_set = set(label_set)
else:
interested_label_set = interested_label_set.union(label_set)
interested_label_list = list(interested_label_set)
else:
interested_label_list = self.interested_label_list
#self.standard_label_index = tuple([int(item) for item in interested_label_list])
for fname in img_name_list:
label = img_label_dict[fname]['label']
label = self.__convert_to_standard_label_map(label, interested_label_list)
label_density = list(np.bincount(label.reshape(-1).astype(np.int32)) / len(label.reshape(-1)))
img_label_dict[fname]['label'] = blosc.pack_array(label)
img_label_dict[fname]['label_density']=label_density
img_label_dict[fname]['label_org_index'] = interested_label_list
img_label_dict[fname]['label_converted_index'] = list(range(len(interested_label_list)))
return img_label_dict
def init_img_pool(self):
"""img pool shoudl include following thing:
img_label_path_dic:{img_name:{'image':img_fp,'label':label_fp,...}
img_label_dic: {img_name:{'image':img_np,'label':label_np},......}
img_list [[s_np,t_np,sl_np,tl_np],....]
only the img_list need to be used by get_item method
"""
use_parallel = self.phase=='train'
if use_parallel:
manager = Manager()
img_label_dic = manager.dict()
img_label_path_dic = {}
img_name_list = []
for i,fps in enumerate(self.path_list):
fn = self.name_list[i]
if fn not in img_label_path_dic:
if self.has_label:
img_label_path_dic[fn] = {'image':fps[0], 'label':fps[1]}
else:
img_label_path_dic[fn] = {'image':fps[0]}
img_name_list.append(fn)
num_of_workers = 4
num_of_workers = num_of_workers if len(self.name_list)>12 else 2
split_dict = self.__split_dict(img_label_path_dic,num_of_workers)
procs =[]
for i in range(num_of_workers):
p = Process(target=self.__read_img_label_into_zipnp,args=(split_dict[i], img_label_dic,))
p.start()
print("pid:{} start:".format(p.pid))
procs.append(p)
for p in procs:
p.join()
print("the loading phase finished, total {} img and labels have been loaded".format(len(img_label_dic)))
img_label_dic=dict(img_label_dic) # todo uncomment manager.dict
else:
img_label_dic=dict()
img_label_path_dic = {}
img_name_list = []
for i,fps in enumerate(self.path_list):
fn = self.name_list[i]
if fn not in img_label_path_dic:
if self.has_label:
img_label_path_dic[fn] = {'image': fps[0], 'label': fps[1]}
else:
img_label_path_dic[fn] = {'image': fps[0]}
img_name_list.append(fn)
self.__read_img_label_into_zipnp(img_label_path_dic, img_label_dic) #todo dels
self.get_organize_structure(img_label_dic,img_name_list)
def get_organize_structure(self, img_label_dic, img_name_list):
if self.has_label:
img_label_dic = self.__get_clean_label(img_label_dic, img_name_list)
for fname in img_name_list:
if self.has_label:
self.img_list.append([img_label_dic[fname]['image'],
img_label_dic[fname]['label']])
else:
self.img_list.append([img_label_dic[fname]['image']])
self.img_sz_list.append(img_label_dic[fname]['img_sz'])
self.original_spacing_list.append(img_label_dic[fname]['original_spacing'])
self.original_sz_list.append(img_label_dic[fname]['original_sz'])
self.spacing_list.append(img_label_dic[fname]['spacing'])
if self.has_label:
self.label_org_index_list.append(img_label_dic[fname]['label_org_index'])
self.label_converted_index_list.append(img_label_dic[fname]['label_converted_index'])
self.label_density_list.append(img_label_dic[fname]['label_density'])
# self.img_list = np.array(self.img_list)
# self.img_sz_list = np.array(self.img_sz_list)
# self.original_spacing_list = np.array(self.original_spacing_list)
# self.original_sz_list = np.array(self.original_sz_list)
# self.spacing_list = np.array(self.spacing_list)
# self.label_org_index_list = np.array(self.label_org_index_list)
# self.label_converted_index_list = np.array(self.label_converted_index_list)
# self.label_density_list = np.array(self.label_density_list)
def resize_img(self, img, is_label=False):
"""
:param img: sitk input, factor is the outputs_ize/patched_sized
:return:
"""
img_sz = img.GetSize()
if self.img_after_resize is not None:
img_after_resize = self.img_after_resize
else:
img_after_resize = np.flipud(img_sz)
resize_factor = np.array(img_after_resize) / np.flipud(img_sz)
spacing_factor = (np.array(img_after_resize)-1) / (np.flipud(img_sz)-1)
resize = not all([factor == 1 for factor in resize_factor])
if resize:
resampler = sitk.ResampleImageFilter()
dimension = 3
factor = np.flipud(resize_factor)
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / spacing_factor[0]
matrix[1, 1] = 1. / spacing_factor[1]
matrix[2, 2] = 1. / spacing_factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_label:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkBSpline)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
return img_resampled, resize_factor
def normalize_intensity(self, img):
"""
a numpy image, normalize into intensity [-1,1]
(img-img.min())/(img.max() - img.min())
:param img: image
:param percen_clip: Linearly normalized image intensities so that the 95-th percentile gets mapped to 0.95; 0 stays 0
:param range_clip: Linearly normalized image intensities from (range_clip[0], range_clip[1]) to 0,1
:return
"""
if self.normalize_via_percentage_clip>0:
img = img - img.min()
normalized_img = img / np.percentile(img, 95) * 0.95
else:
range_clip = self.normalize_via_range_clip
if range_clip[0]<range_clip[1]:
img = np.clip(img,a_min=range_clip[0], a_max=range_clip[1])
min_intensity = img.min()
max_intensity = img.max()
normalized_img = (img - img.min()) / (max_intensity - min_intensity)
normalized_img = normalized_img * 2 - 1
return normalized_img
def __read_and_clean_itk_info(self, path):
if path is not None:
img = sitk.ReadImage(path)
spacing_sitk = img.GetSpacing()
img_sz_sitk = img.GetSize()
return sitk.GetImageFromArray(sitk.GetArrayFromImage(img)), np.flipud(spacing_sitk), np.flipud(img_sz_sitk)
else:
return None, None, None
def __read_itk_into_np(self, path):
return sitk.GetArrayFromImage(sitk.ReadImage(path))
def __split_dict(self, dict_to_split, split_num):
index_list = list(range(len(dict_to_split)))
index_split = np.array_split(np.array(index_list), split_num)
split_dict = []
dict_to_split_items = list(dict_to_split.items())
for i in range(split_num):
dj = dict(dict_to_split_items[index_split[i][0]:index_split[i][-1] + 1])
split_dict.append(dj)
return split_dict
def __convert_np_to_itk_coord(self,coord_list):
return list(np.flipud(np.array(coord_list)))
def get_transform_seq(self,i):
option_trans = deepcopy(self.seg_option['transform'])
option_trans['shared_info']['label_list'] = self.label_converted_index_list[i]
option_trans['shared_info']['label_density'] = self.label_density_list[i]
option_trans['shared_info']['img_size'] = self.__convert_np_to_itk_coord(self.img_sz_list[i])
option_trans['shared_info']['num_crop_per_class_per_train_img'] = self.seg_option['num_crop_per_class_per_train_img']
option_trans['my_bal_rand_crop']['scale_ratio'] = self.seg_option['transform']['my_bal_rand_crop']['scale_ratio']
option_trans['patch_size'] = self.__convert_np_to_itk_coord(self.seg_option['patch_size'])
transform = Transform(option_trans)
return transform.get_transform_seq(self.transform_name_seq)
def apply_transform(self,sample, transform_seq, rand_label_id=-1):
for transform in transform_seq:
sample = transform(sample, rand_label_id)
return sample
def init_corr_transform_pool(self):
self.corr_transform_pool = [self.get_transform_seq(i) for i in range(self.num_img)]
def init_corr_partition_pool(self):
from data_pre.partition import partition
patch_sz_itk =self.__convert_np_to_itk_coord(self.seg_option['patch_size'])
overlap_sz_itk =self.__convert_np_to_itk_coord(self.option_p['overlap_size'])
self.corr_partition_pool = [deepcopy(partition(self.option_p,patch_sz_itk,overlap_sz_itk)) for _ in range(self.num_img)]
def __len__(self):
if self.phase == "train":
if not self.use_whole_img_as_input:
return len(self.name_list)*1000
else:
return len(self.name_list)
else:
return len(self.name_list)
def __getitem__(self, idx):
"""
:param idx: id of the items
:return: the processed data, return as type of dic
"""
random_state = np.random.RandomState(int(time.time()))
rand_label_id =random_state.randint(0,1000)+idx
idx = idx%self.num_img
filename = self.name_list[idx]
zipnp_list = self.img_list[idx]
spacing = self.spacing_list[idx]
original_spacing = self.original_spacing_list[idx]
original_sz = self.original_sz_list[idx]
if self.has_label:
img_np, label_np = [blosc.unpack_array(item) for item in zipnp_list]
else:
img_np = blosc.unpack_array(zipnp_list[0])
img_path = self.path_list[idx]
img_shape = img_np.shape
if self.phase=="train":
sample = {'image': [img_np], 'label': label_np} # here the list is for multi-modality , each mode is an elem in list
sample = self.apply_transform(sample,self.corr_transform_pool[idx],rand_label_id)
else:
if not self.has_label:
sample = {'image': [img_np]}
else:
sample = {'image': [img_np], 'label':label_np}
if not self.use_whole_img_as_input:
sample = self.corr_partition_pool[idx](sample)
else:
sample['image'] = np.stack(sample['image'], 0)
sample['image'] = np.stack(sample['image'], 0)
sample['img_path'] = img_path
if self.transform:
sample['image'] = self.transform(sample['image'])
if self.has_label:
sample['label'] = self.transform(sample['label'])
sample['spacing'] = spacing.copy()
sample["image_after_resize"] =np.array(img_shape)
sample['original_sz'] = original_sz.copy()
sample['original_spacing'] = original_spacing.copy()
return sample, filename
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
n_tensor = torch.from_numpy(sample)
return n_tensor
| 21,752 | 44.413361 | 186 | py |
easyreg | easyreg-master/easyreg/multiscale_net.py | """
"""
import copy
from .losses import NCCLoss, Loss
from .net_utils import gen_identity_map
import mermaid.finite_differences_multi_channel as fdt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .net_utils import Bilinear
from mermaid.libraries.modules import stn_nd
from .affine_net import AffineNetSym
from .utils import sigmoid_decay, get_resampled_image
import mermaid.utils as py_utils
import mermaid.smoother_factory as SF
class conv_bn_rel(nn.Module):
"""
conv + bn (optional) + relu
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False,
bn=False, reverse=False, group=1, dilation=1):
super(conv_bn_rel, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
if not reverse:
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding=padding, groups=group, dilation=dilation)
else:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride, padding=padding, groups=group, dilation=dilation)
self.bn = nn.BatchNorm3d(out_channels) if bn else None #, eps=0.0001, momentum=0, affine=True
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
elif active_unit == 'leaky_relu':
self.active_unit = nn.LeakyReLU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class Multiscale_Flow(nn.Module):
def __init__(self, img_sz, low_res_factor=1, batch_sz=1, compute_feature_similarity=False, bn=False):
super(Multiscale_Flow,self).__init__()
self.img_sz = img_sz
self.low_res_factor = low_res_factor
self.compute_feature_similarity = compute_feature_similarity # not support yet in this framework
self.down_path_1 = conv_bn_rel(2, 16, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_1 = conv_bn_rel(16, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_3 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=bn,group=2)
self.down_path_4_1 = conv_bn_rel(32, 64, 3, stride=2, active_unit='relu', same_padding=True, bn=bn,group=2)
self.down_path_4_2 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn,group=2)
self.down_path_4_3 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn,group=2)
self.down_path_8_1 = conv_bn_rel(64, 128, 3, stride=2, active_unit='relu', same_padding=True, bn=bn,group=2)
self.down_path_8_2 = conv_bn_rel(128, 128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn,group=2)
self.down_path_8_3 = conv_bn_rel(128, 128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn,group=2)
self.down_path_16_1 = conv_bn_rel(128, 256, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_16_2 = conv_bn_rel(256, 256, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.flow_conv_16 = conv_bn_rel(256, 3, 3, stride=1, active_unit='None', same_padding=True, bn=False)
self.upsample_16_8 = Bilinear(zero_boundary=False,using_scale=False)
# output_size = strides * (input_size-1) + kernel_size - 2*padding
self.up_path_8_1 = conv_bn_rel(256, 128, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_8_2= conv_bn_rel(128+128+3, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_8_3= conv_bn_rel(128, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.flow_conv_8 = conv_bn_rel(128, 3, 3, stride=1, active_unit='None', same_padding=True, bn=False)
img_sz_8 = [batch_sz,1] +list([int(d/8) for d in self.img_sz])
spacing_8 = 1/(np.array(img_sz_8[2:])-1)
id_map_8 = py_utils.identity_map_multiN(img_sz_8, spacing_8)*2-1
self.id_map_8 = torch.Tensor(id_map_8).cuda()
self.interp_8 = Bilinear(zero_boundary=False,using_scale=False)
self.sinterp_8 = Bilinear(zero_boundary=True,using_scale=True)
self.tinterp_8 = Bilinear(zero_boundary=True,using_scale=True)
self.upsample_8_4 = Bilinear(zero_boundary=False,using_scale=False)
self.up_path_4_1 = conv_bn_rel(128, 64, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_4_2 = conv_bn_rel(64+64+3, 32, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_4_3 = conv_bn_rel(32, 32, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.flow_conv_4 = conv_bn_rel(32, 3, 3, stride=1, active_unit='None', same_padding=True, bn=False)
img_sz_4 = [batch_sz, 1] + list([int(d/4) for d in self.img_sz])
spacing_4 = 1 / (np.array(img_sz_4[2:]) - 1)
id_map_4 = py_utils.identity_map_multiN(img_sz_4, spacing_4)*2-1
self.id_map_4 = torch.Tensor(id_map_4).cuda()
self.interp_4 = Bilinear(zero_boundary=False,using_scale=False)
self.sinterp_4 = Bilinear(zero_boundary=True, using_scale=True)
self.tinterp_4 = Bilinear(zero_boundary=True, using_scale=True)
self.upsample_4_2 = Bilinear(zero_boundary=False,using_scale=False)
self.up_path_2_1 = conv_bn_rel(32, 32, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_2_2 = conv_bn_rel(32+32+3, 16, 3, stride=1, active_unit='None', same_padding=True)
self.up_path_2_3 = conv_bn_rel(16, 16, 3, stride=1, active_unit='None', same_padding=True)
self.flow_conv_2 = conv_bn_rel(16, 3, 3, stride=1, active_unit='None', same_padding=True, bn=False)
img_sz_2 = [batch_sz, 1] + list([int(d/2) for d in self.img_sz])
spacing_2 = 1 / (np.array(img_sz_2[2:]) - 1)
id_map_2 = py_utils.identity_map_multiN(img_sz_2, spacing_2) * 2 - 1
self.id_map_2 = torch.Tensor(id_map_2).cuda()
self.interp_2 = Bilinear(zero_boundary=False,using_scale=False)
self.sinterp_2 = Bilinear(zero_boundary=True, using_scale=True)
self.tinterp_2 = Bilinear(zero_boundary=True, using_scale=True)
self.upsample_2_1 = Bilinear(zero_boundary=False,using_scale=False)
img_sz_1 = [batch_sz, 1] + list([int(d/1) for d in self.img_sz])
spacing_1 = 1 / (np.array(img_sz_1[2:]) - 1)
id_map_1 = py_utils.identity_map_multiN(img_sz_1, spacing_1) * 2 - 1
self.id_map_1 = torch.Tensor(id_map_1).cuda()
self.interp_1 = Bilinear(zero_boundary=False,using_scale=False)
self.sinterp_1 = Bilinear(zero_boundary=True, using_scale=True)
self.tinterp_1 = Bilinear(zero_boundary=True, using_scale=True)
self.minterp_1 = Bilinear(zero_boundary=False, using_scale=False)
if self.low_res_factor==1:
self.up_path_1_1 = conv_bn_rel(16, 16, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,
reverse=True)
self.up_path_1_2 = conv_bn_rel(16+16+3, 8, 3, stride=1, active_unit='None', same_padding=True)
self.up_path_1_3 = conv_bn_rel(8, 8, 3, stride=1, active_unit='None', same_padding=True)
self.flow_conv_1 = conv_bn_rel(8, 3, 3, stride=1, active_unit='None', same_padding=True, bn=False)
def forward(self, source,target, initial_map,smoother=None):
input_cat = torch.cat((source, target), dim=1)
d1 = self.down_path_1(input_cat)
d2_1 = self.down_path_2_1(d1)
d2_2 = self.down_path_2_2(d2_1)
d2_2 = d2_1 + d2_2
d2_3 = self.down_path_2_3(d2_2)
d2_3 = d2_1 + d2_3
d4_1 = self.down_path_4_1(d2_3)
d4_2 = self.down_path_4_2(d4_1)
d4_2 = d4_1 + d4_2
d4_3 = self.down_path_4_3(d4_2)
d4_3 = d4_2 + d4_3
d8_1 = self.down_path_8_1(d4_3)
d8_2 = self.down_path_8_2(d8_1)
d8_2 = d8_1 + d8_2
d8_3 = self.down_path_8_3(d8_2)
d8_3 = d8_2+ d8_3
d16_1 = self.down_path_16_1(d8_3)
d16_2 = self.down_path_16_2(d16_1)
d16_2 = d16_1 + d16_2
flow_16 = self.flow_conv_16(d16_2)
flow_16_8 = self.upsample_16_8(flow_16,self.id_map_8)
deform_field_8 = self.id_map_8 + flow_16_8
warped_8 = self.sinterp_8(source, deform_field_8)
target_8 = self.tinterp_8(target, self.id_map_8)
u8_1 = self.up_path_8_1(d16_2)
u8_2 = self.up_path_8_2(torch.cat((d8_3,u8_1,flow_16_8),1))
u8_3 = self.up_path_8_3(u8_2)
flow_8 = self.flow_conv_8(u8_3)+ flow_16_8
flow_8_4 = self.upsample_8_4(flow_8,self.id_map_4)
deform_field_4 = self.id_map_4 + flow_8_4
warped_4 = self.sinterp_4(source, deform_field_4)
target_4 = self.tinterp_4(target, self.id_map_4)
u4_1 = self.up_path_4_1(u8_3)
u4_2 = self.up_path_4_2(torch.cat((d4_3, u4_1, flow_8_4), 1))
u4_3 = self.up_path_4_3(u4_2)
flow_4 = self.flow_conv_4(u4_3) +flow_8_4
flow_4_2 = self.upsample_4_2(flow_4, self.id_map_2)
deform_field_2 = self.id_map_2 + flow_4_2
warped_2 = self.sinterp_2(source, deform_field_2)
target_2 = self.tinterp_2(target, self.id_map_2)
u2_1 = self.up_path_2_1(u4_3)
u2_2 = self.up_path_2_2(torch.cat((d2_3, u2_1, flow_4_2), 1))
u2_3 = self.up_path_2_3(u2_2)
flow_2 = self.flow_conv_2(u2_3) + flow_4_2
flow_2_1 = self.upsample_2_1(flow_2, self.id_map_1)
if self.low_res_factor==1:
u1_1 = self.up_path_1_1(u2_3)
u1_2 = self.up_path_1_2(torch.cat((d1, u1_1, flow_2_1), 1))
u1_3 = self.up_path_1_3(u1_2)
flow_1 = self.flow_conv_1(u1_3) + flow_2_1
sm_flow_1 = flow_1 if smoother is None else smoother.smooth(flow_1)
deform_field_1 = self.id_map_1 + sm_flow_1
else:
flow_1 = flow_2_1
sm_flow_1 = flow_2_1 if smoother is None else smoother.smooth(flow_2_1)
deform_field_1 = self.id_map_1 + sm_flow_1
warped_1 = self.sinterp_1(source, deform_field_1)
target_1 = target
deformed_map =self.minterp_1(initial_map,deform_field_1)
flow_list = [flow_8,flow_4-flow_8_4,flow_2-flow_4_2,flow_1-flow_2_1]
warp_list = [warped_8,warped_4,warped_2,warped_1]
target_list = [target_8, target_4,target_2, target_1]
return flow_list, warp_list, target_list, deformed_map
class Multiscale_FlowNet(nn.Module):
"""
:param vol_size: volume size. e.g. (256, 256, 256)
:param enc_nf: list of encoder filters. right now it needs to be 1x4.
e.g. [16,32,32,32]
:param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
:return: the reg_model
"""
def __init__(self, img_sz, opt=None):
super(Multiscale_FlowNet, self).__init__()
self.is_train = opt['tsk_set'][('train',False,'if is in train mode')]
opt_multiscale_regnet = opt['tsk_set']['reg'][('multiscale_net',{},"settings for the network")]
batch_sz = opt['tsk_set'][('batch_sz',1,"batch size ")]
self.load_trained_affine_net = opt_multiscale_regnet[('load_trained_affine_net',False,'if true load_trained_affine_net; if false, the affine network is not initialized')]
self.using_affine_init = opt_multiscale_regnet[("using_affine_init",True, "deploy affine network before the nonparametric network")]
self.affine_init_path = opt_multiscale_regnet[('affine_init_path','',"the path of pretrained affine model")]
self.affine_refine_step = opt_multiscale_regnet[('affine_refine_step', 5, "the multi-step num in affine refinement")]
self.initial_reg_factor = opt_multiscale_regnet[('initial_reg_factor', 1., 'initial regularization factor')]
self.min_reg_factor = opt_multiscale_regnet[('min_reg_factor', 1., 'minimum of regularization factor')]
self.low_res_factor = opt_multiscale_regnet[('low_res_factor', 1., 'low_res_factor')]
self.scale_weight_list = opt_multiscale_regnet[("scale_weight_list",[1.0,0.8,0.6,0.4],"scale_weight_list")]
self.compute_feature_similarity = opt_multiscale_regnet[("compute_feature_similarity",False,"compute similarity in feature space")]
self.compute_grad_image_loss = opt_multiscale_regnet[("compute_grad_image_loss",False,"compute similarity between grad image")]
self.activate_grad_image_after_epoch = opt_multiscale_regnet[("activate_grad_image_after_epoch", 60,"activate_grad_image_after_epoch")]
self.compute_hess_image_loss = opt_multiscale_regnet[("compute_hess_image_loss", False, "compute similarity between hess image")]
self.activate_hess_image_after_epoch = opt_multiscale_regnet[("activate_hess_image_after_epoch", 60,"activate_hess_image_after_epoch")]
self.activate_lncc_after_epoch = opt_multiscale_regnet[("activate_lncc_after_epoch", 100,"activate_lncc_after_epoch")]
self.deploy_mask_during_training = opt_multiscale_regnet[("deploy_mask_during_training", False,"deploy_mask_during_training")]
self.img_sz = img_sz
self.spacing = 1./(np.array(img_sz)-1)
self.double_spacing = self.spacing*2
self.network = Multiscale_Flow(img_sz=self.img_sz, low_res_factor=self.low_res_factor,batch_sz=batch_sz)
self.init_smoother(opt_multiscale_regnet)
self.sim_fn = NCCLoss()
self.extern_sim_fn = Loss(opt).criterion
self.epoch = -1
self.print_count = 0
if self.using_affine_init:
self.init_affine_net(opt)
self.id_transform = None
else:
self.id_transform = gen_identity_map(self.img_sz, 1.0).cuda()
print("Attention, the affine net is not used")
# identity transform for computing displacement
def load_pretrained_model(self, pretrained_model_path):
checkpoint = torch.load(pretrained_model_path, map_location="cpu")
# cur_state = self.state_dict()
# for key in list(checkpoint["state_dict"].keys()):
# if "network." in key:
# replaced_key = key.replace("network.", "")
# if replaced_key in cur_state:
# cur_state[replaced_key] = checkpoint["state_dict"].pop(key)
# else:
# print("")
self.load_state_dict(checkpoint["state_dict"])
print("load pretrained model from {}".format(pretrained_model_path))
def init_smoother(self, opt):
#the output displacement is defined on the transformation map [-1,1]
self.flow_smoother = SF.SmootherFactory(self.img_sz, self.double_spacing).create_smoother(opt)
opt_cp= copy.deepcopy(opt)
opt_cp["smoother"]["type"] = "gaussian"
opt_cp["smoother"]["gaussian_std"] = 0.1
self.mask_smoother = SF.SmootherFactory(self.img_sz, self.double_spacing).create_smoother(opt_cp)
def set_cur_epoch(self, cur_epoch=-1):
""" set current epoch"""
self.epoch = cur_epoch
def set_loss_fn(self, loss_fn):
""" set loss function"""
self.loss_fn = loss_fn
def init_affine_net(self,opt):
self.affine_net = AffineNetSym(self.img_sz, opt)
self.affine_net.compute_loss = False
self.affine_net.epoch_activate_sym = 1e7 # todo to fix this unatural setting
self.affine_net.set_step(self.affine_refine_step)
model_path = self.affine_init_path
if self.load_trained_affine_net and self.is_train:
checkpoint = torch.load(model_path, map_location='cpu')
self.affine_net.load_state_dict(checkpoint['state_dict'])
self.affine_net.cuda()
print("Affine model is initialized!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
else:
print(
"The Affine model is added, but not initialized, this should only take place when a complete checkpoint (including affine model) will be loaded")
self.affine_net.eval()
def compute_grad_image(self, image):
fd = fdt.FD_torch_multi_channel(self.spacing)
dfx = fd.dXf(image)
dfy = fd.dYf(image)
dfz = fd.dZf(image)
grad_image = torch.cat([dfx,dfy,dfz],1)
return grad_image
def compute_hessian_image(self,image):
fd = fdt.FD_torch_multi_channel(self.spacing)
ddfx = fd.ddXc(image)
ddfy = fd.ddYc(image)
ddfz = fd.ddZc(image)
hess_image = torch.cat([ddfx,ddfy,ddfz],1)
return hess_image
def compute_derivative_image_similarity(self,warp, target, target_mask=None,mode="grad"):
compute_grad = True if mode=="grad" else False
derivative_fn = self.compute_grad_image if compute_grad else self.compute_hessian_image
grad_warp = derivative_fn(warp)
grad_target = derivative_fn(target)
if target_mask is not None:
target_mask = self.mask_smoother.smooth(target_mask)
target_mask = target_mask.repeat([1,3,1,1,1])
sim_loss = NCCLoss()(grad_warp, grad_target, mask=target_mask)
return sim_loss
def update_sim_fn(self):
self.sim_fn = self.sim_fn if self.epoch<self.activate_lncc_after_epoch else self.extern_sim_fn
def normalize(self,img):
batch = img.shape[0]
batch_min = torch.min(img.view(batch,-1), dim=1,keepdim=True)[0].view(batch,1,1,1,1)
batch_max = torch.max(img.view(batch,-1), dim=1,keepdim=True)[0].view(batch,1,1,1,1)
img = (img-batch_min)/(batch_max-batch_min)
return img
def forward(self, source, target,source_mask=None, target_mask=None):
#
self.update_sim_fn()
if self.using_affine_init:
with torch.no_grad():
affine_img, affine_map, _ = self.affine_net(source, target)
else:
affine_map = self.id_transform.clone()
affine_img = source
affined_mask = Bilinear()(source_mask,affine_map)
target = self.normalize((target + 1) * target_mask) * 2 - 1
affine_img = self.normalize((affine_img + 1) * affined_mask) * 2 - 1
disp_list, warp_list, target_list, phi = self.network(affine_img, target, affine_map, self.flow_smoother)
sim_loss_list = [self.sim_fn(cur_warp, cur_targ) for cur_warp, cur_targ in zip(warp_list, target_list)]
spacing_list = [2/(np.array(disp.shape[2:])-1) for disp in disp_list]
reg_loss_list = [self.reg_fn(disp,spacing) for disp, spacing in zip(disp_list, spacing_list)]
sim_loss = sum([w*sim for w, sim in zip(self.scale_weight_list, sim_loss_list)])
reg_loss = sum([w*reg for w, reg in zip(self.scale_weight_list, reg_loss_list)])
if self.compute_grad_image_loss and self.epoch > self.activate_grad_image_after_epoch:
sim_loss = sim_loss + self.compute_derivative_image_similarity(warp_list[-1], target, target_mask,mode="grad")
if self.compute_hess_image_loss and self.epoch > self.activate_hess_image_after_epoch:
sim_loss = sim_loss + self.compute_derivative_image_similarity(warp_list[-1], target, target_mask,mode="hess")
composed_deformed = phi
self.sim_loss = sim_loss
self.reg_loss = reg_loss
self.warped = warp_list[-1]
self.target = target_list[-1]
self.disp_field = disp_list[-1]
self.source = source
if self.train:
self.print_count += 1
return self.warped, composed_deformed, affine_img
def get_extra_to_plot(self):
return None, None
def check_if_update_lr(self):
return False, None
def reg_fn(self,disp, spacing):
l2 = disp**2
reg = l2.mean()
return reg
def get_sim_loss(self):
return self.sim_loss
def weights_init(self):
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if not m.weight is None:
nn.init.xavier_normal_(m.weight.data)
if not m.bias is None:
m.bias.data.zero_()
def get_reg_factor(self):
factor = self.initial_reg_factor # 1e-7
factor = float(max(sigmoid_decay(self.epoch, static=5, k=4) * factor, self.min_reg_factor))
return factor
def get_loss(self):
reg_factor = self.get_reg_factor()
sim_loss = self.sim_loss
reg_loss = self.reg_loss
if self.print_count % 10 == 0:
print('current sim loss is{}, current_reg_loss is {}, and reg_factor is {} '.format(sim_loss.item(),
reg_loss.item(),
reg_factor))
return sim_loss+ reg_factor*reg_loss
def get_inverse_map(self, use_01=False):
# TODO not test yet
print("VoxelMorph approach doesn't support analytical computation of inverse map")
print("Instead, we compute it's numerical approximation")
_, inverse_map, _ = self.forward(self.target, self.source)
return inverse_map
| 21,795 | 50.649289 | 178 | py |
easyreg | easyreg-master/easyreg/modules.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mermaid.utils as py_utils
import torch
from .net_utils import *
class Affine_unet(nn.Module):
def __init__(self):
super(Affine_unet,self).__init__()
#(W−F+2P)/S+1, W - input size, F - filter size, P - padding size, S - stride.
# self.down_path_1 = conv_bn_rel(2, 16, 3, stride=1,active_unit='relu', same_padding=True, bn=False)
# self.down_path_2 = conv_bn_rel(16, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
# self.down_path_4 = conv_bn_rel(32, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
# self.down_path_8 = conv_bn_rel(32, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
# self.down_path_16 = conv_bn_rel(32, 16, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
# self.fc_1 = FcRel(16*5*12*12,144,active_unit='relu')
# self.fc_2 = FcRel(144,12,active_unit = 'None')
self.down_path_1 = conv_bn_rel(1, 16, 3, stride=1, active_unit='relu', same_padding=True, bn=False)
self.down_path_2 = MaxPool(2,2)
self.down_path_4 = conv_bn_rel(32, 16, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
self.down_path_8 = MaxPool(2,2)
self.down_path_16 = conv_bn_rel(16, 4, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
self.down_path_32 = MaxPool(2,2)
self.fc_1 = FcRel(4 * 2 * 6 * 6, 32, active_unit='relu')
self.fc_2 = FcRel(32, 12, active_unit='None')
def forward(self, m,t):
d1_m = self.down_path_1(m)
d1_t = self.down_path_1(t)
d1 = torch.cat((d1_m,d1_t),1)
d2 = self.down_path_2(d1)
d4 = self.down_path_4(d2)
d8 = self.down_path_8(d4)
d16 = self.down_path_16(d8)
d32 = self.down_path_32(d16)
fc1 = self.fc_1(d32.view(d32.shape[0],-1))
fc2 = self.fc_2(fc1).view((d32.shape[0],-1))
return fc2
class Affine_unet_im(nn.Module):
def __init__(self, use_identity=False, fc_size=4*6*6*5):
super(Affine_unet_im,self).__init__()
self.down_path_1 = conv_bn_rel(1, 16, 3, stride=1, active_unit='relu', same_padding=True, bn=False)
self.down_path_2_1 = MaxPool(2,2)
self.down_path_2_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False)
self.down_path_4_1 = conv_bn_rel(32, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
self.down_path_4_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False)
self.down_path_1_t_4 = nn.Sequential(self.down_path_2_1,self.down_path_2_2,self.down_path_4_1,self.down_path_4_2)
self.down_path_8_1 = MaxPool(2,2)
self.down_path_8_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False)
self.down_path_16_1 = conv_bn_rel(32, 16, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
self.down_path_16_2 = conv_bn_rel(16, 16, 3, stride=1, active_unit='relu', same_padding=True, bn=False)
self.down_path_32 = conv_bn_rel(16, 4, 3, stride=2, active_unit='relu', same_padding=True, bn=False)
self.down_path_4_t_32 = nn.Sequential(self.down_path_8_1,self.down_path_8_2,self.down_path_16_1,self.down_path_16_2,
self.down_path_32)
# fc_size = 4*6*6*5 # oai 4*3*6*6 #lung 4*5*5*5 oasis 4*4*4*4 # brats 4*3*3*3 Z
self.fc_1 = FcRel(fc_size, 32, active_unit='relu')
self.fc_2 = FcRel(32, 12, active_unit='None')
self.identityMap = None
def forward(self, m,t):
# if self.identityMap is None:
# self.identityMap = torch.zeros(12).cuda()
# self.identityMap[0] = 1.
# self.identityMap[4] = 1.
# self.identityMap[8] = 1.
#
#
# return torch.cat([self.identityMap.unsqueeze(0)]*m.shape[0], dim=0)
d1_m = self.down_path_1(m)
d1_t = self.down_path_1(t)
d1 = torch.cat((d1_m,d1_t),1)
d4 = self.down_path_1_t_4(d1)
d32 = self.down_path_4_t_32(d4)
fc1 = self.fc_1(d32.view(d32.shape[0],-1))
fc2 = self.fc_2(fc1).view((d32.shape[0],-1))
return fc2
class MomentumGen(nn.Module):
def __init__(self, low_res_factor=1):
super(MomentumGen,self).__init__()
self.low_res_factor = low_res_factor
self.down_path_1 = conv_bn_rel(2, 16, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=False,group=2)
self.down_path_2 = conv_bn_rel(16, 32, 3, stride=2, active_unit='leaky_relu', same_padding=True, bn=False,group=2)
self.down_path_4 = conv_bn_rel(32, 32, 3, stride=2, active_unit='leaky_relu', same_padding=True, bn=False)
self.down_path_8 = conv_bn_rel(32, 32, 3, stride=2, active_unit='leaky_relu', same_padding=True, bn=False)
self.down_path_16 = conv_bn_rel(32, 32, 3, stride=2, active_unit='leaky_relu', same_padding=True, bn=False)
# output_size = strides * (input_size-1) + kernel_size - 2*padding
self.up_path_8 = conv_bn_rel(32, 32, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=False,
reverse=True)
self.up_path_4 = conv_bn_rel(64, 32, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=False,
reverse=True)
self.up_path_2_1 = conv_bn_rel(64, 32, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=False,
reverse=True)
if low_res_factor==1 or low_res_factor==None or low_res_factor ==[1.,1.,1.]:
self.up_path_2_2 = conv_bn_rel(64, 8, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=False)
self.up_path_1_1 = conv_bn_rel(8, 8, 2, stride=2, active_unit='None', same_padding=False, bn=False, reverse=True)
self.up_path_1_2 = conv_bn_rel(24, 3, 3, stride=1, active_unit='None', same_padding=True, bn=False)
elif low_res_factor ==0.5 :
self.up_path_2_2 = conv_bn_rel(32, 3, 3, stride=1, active_unit='None', same_padding=True, bn=False)
def forward(self, x):
output = None
d1 = self.down_path_1(x)
d2 = self.down_path_2(d1)
d4 = self.down_path_4(d2)
d8 = self.down_path_8(d4)
d16 = self.down_path_16(d8)
u8 = self.up_path_8(d16)
u4 = self.up_path_4(torch.cat((u8, d8), 1))
del d8
u2_1 = self.up_path_2_1(torch.cat((u4, d4), 1))
del d4
if self.low_res_factor==1:
u2_2 = self.up_path_2_2(torch.cat((u2_1, d2), 1))
del d2
u1_1 = self.up_path_1_1(u2_2)
output = self.up_path_1_2(torch.cat((u1_1, d1), 1))
del d1
elif self.low_res_factor==0.5:
output = self.up_path_2_2(u2_1)
return output
class MomentumGen_im(nn.Module):
def __init__(self, low_res_factor=1,bn=False):
super(MomentumGen_im,self).__init__()
self.low_res_factor = low_res_factor
self.down_path_1 = conv_bn_rel(2, 16, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_1 = conv_bn_rel(16, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_4_1 = conv_bn_rel(32, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_1 = conv_bn_rel(32, 64, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_2 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_16 = conv_bn_rel(64, 64, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
# output_size = strides * (input_size-1) + kernel_size - 2*padding
self.up_path_8_1 = conv_bn_rel(64, 64, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_8_2= conv_bn_rel(128, 64, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_4_1 = conv_bn_rel(64, 64, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_4_2 = conv_bn_rel(96, 32, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_2_1 = conv_bn_rel(32, 32, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
if low_res_factor==1 or low_res_factor==None or low_res_factor ==[1.,1.,1.]:
self.up_path_2_2 = conv_bn_rel(64, 8, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_1_1 = conv_bn_rel(8, 8, 2, stride=2, active_unit='None', same_padding=False, bn=bn, reverse=True)
self.up_path_1_2 = conv_bn_rel(24, 3, 3, stride=1, active_unit='None', same_padding=True, bn=bn)
elif low_res_factor ==0.5 :
self.up_path_2_2 = conv_bn_rel(64, 16, 3, stride=1, active_unit='None', same_padding=True)
self.up_path_2_3 = conv_bn_rel(16, 3, 3, stride=1, active_unit='None', same_padding=True)
def forward(self, x):
d1 = self.down_path_1(x)
d2_1 = self.down_path_2_1(d1)
d2_2 = self.down_path_2_2(d2_1)
d4_1 = self.down_path_4_1(d2_2)
d4_2 = self.down_path_4_2(d4_1)
d8_1 = self.down_path_8_1(d4_2)
d8_2 = self.down_path_8_2(d8_1)
d16 = self.down_path_16(d8_2)
u8_1 = self.up_path_8_1(d16)
u8_2 = self.up_path_8_2(torch.cat((d8_2,u8_1),1))
u4_1 = self.up_path_4_1(u8_2)
u4_2 = self.up_path_4_2(torch.cat((d4_2,u4_1),1))
u2_1 = self.up_path_2_1(u4_2)
u2_2 = self.up_path_2_2(torch.cat((d2_2, u2_1), 1))
output = self.up_path_2_3(u2_2)
if not self.low_res_factor==0.5:
raise('for now. only half sz downsampling is supported')
return output
class MomentumGen_resid(nn.Module):
def __init__(self, low_res_factor=1, bn=False, adaptive_mode=False):
super(MomentumGen_resid,self).__init__()
self.low_res_factor = low_res_factor
self.down_path_1 = conv_bn_rel(2, 16, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_1 = conv_bn_rel(16, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_3 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_1 = conv_bn_rel(32, 64, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_2 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_3 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_1 = conv_bn_rel(64, 128, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_2 = conv_bn_rel(128, 128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_3 = conv_bn_rel(128, 128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_16_1 = conv_bn_rel(128, 256, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_16_2 = conv_bn_rel(256, 256, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
# output_size = strides * (input_size-1) + kernel_size - 2*padding
self.up_path_8_1 = conv_bn_rel(256, 128, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_8_2= conv_bn_rel(128+128, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_8_3= conv_bn_rel(128, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_4_1 = conv_bn_rel(128, 64, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_4_2 = conv_bn_rel(64+64, 32, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_4_3 = conv_bn_rel(32, 32, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_2_1 = conv_bn_rel(32, 32, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_2_2 = conv_bn_rel(32+32, 16, 3, stride=1, active_unit='None', same_padding=True)
self.up_path_2_3 = conv_bn_rel(16, 3, 3, stride=1, active_unit='None', same_padding=True)
def forward(self, x):
d1 = self.down_path_1(x)
d2_1 = self.down_path_2_1(d1)
d2_2 = self.down_path_2_2(d2_1)
d2_2 = d2_1 + d2_2
d2_3 = self.down_path_2_3(d2_2)
d2_3 = d2_1 + d2_3
d4_1 = self.down_path_4_1(d2_3)
d4_2 = self.down_path_4_2(d4_1)
d4_2 = d4_1 + d4_2
d4_3 = self.down_path_4_3(d4_2)
d4_3 = d4_2 + d4_3
d8_1 = self.down_path_8_1(d4_3)
d8_2 = self.down_path_8_2(d8_1)
d8_2 = d8_1 + d8_2
d8_3 = self.down_path_8_3(d8_2)
d8_3 = d8_2+ d8_3
d16_1 = self.down_path_16_1(d8_3)
d16_2 = self.down_path_16_2(d16_1)
d16_2 = d16_1 + d16_2
u8_1 = self.up_path_8_1(d16_2)
u8_2 = self.up_path_8_2(torch.cat((d8_3,u8_1),1))
u8_3 = self.up_path_8_3(u8_2)
u8_3 = u8_2 + u8_3
u4_1 = self.up_path_4_1(u8_3)
u4_2 = self.up_path_4_2(torch.cat((d4_3,u4_1),1))
u4_3 = self.up_path_4_3(u4_2)
u4_3 = u4_2 + u4_3
u2_1 = self.up_path_2_1(u4_3)
u2_2 = self.up_path_2_2(torch.cat((d2_3, u2_1), 1))
output = self.up_path_2_3(u2_2)
if not self.low_res_factor==0.5:
raise('for now. only half sz downsampling is supported')
return output
class Seg_resid(nn.Module):
def __init__(self, num_class, bn=False):
super(Seg_resid,self).__init__()
self.down_path_1 = conv_bn_rel(1, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_1 = conv_bn_rel(32, 64, 3, stride=2, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_2 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_3 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_1 = conv_bn_rel(64, 128, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_2 = conv_bn_rel(128,128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_3 = conv_bn_rel(128, 128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_1 = conv_bn_rel(128, 256, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_2 = conv_bn_rel(256, 256, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_3 = conv_bn_rel(256, 256, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.up_path_4_1 = conv_bn_rel(256, 128, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_4_2 = conv_bn_rel(128+128, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_4_3 = conv_bn_rel(128, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_2_1 = conv_bn_rel(128, 128, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_2_2 = conv_bn_rel(128+64, 96, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_2_3 = conv_bn_rel(96, 96, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_1_1 = conv_bn_rel(96, 96, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_1_2 = conv_bn_rel(96+32, 64, 3, stride=1, active_unit='leaky_relu',same_padding=True, bn=bn)
self.up_path_1_3 = conv_bn_rel(64, num_class, 3, stride=1, active_unit='leaky_relu', same_padding=True)
def forward(self, x):
d1 = self.down_path_1(x)
d2_1 = self.down_path_2_1(d1)
d2_2 = self.down_path_2_2(d2_1)
d2_2 = d2_1 + d2_2
d2_3 = self.down_path_2_3(d2_2)
d2_3 = d2_1 + d2_3
d4_1 = self.down_path_4_1(d2_3)
d4_2 = self.down_path_4_2(d4_1)
d4_2 = d4_1 + d4_2
d4_3 = self.down_path_4_3(d4_2)
d4_3 = d4_2 + d4_3
d8_1 = self.down_path_8_1(d4_3)
d8_2 = self.down_path_8_2(d8_1)
d8_2 = d8_1 + d8_2
d8_3 = self.down_path_8_3(d8_2)
d8_3 = d8_2+ d8_3
u4_1 = self.up_path_4_1(d8_3)
u4_2 = self.up_path_4_2(torch.cat((d4_3,u4_1),1))
u4_3 = self.up_path_4_3(u4_2)
u4_3 = u4_2 + u4_3
u2_1 = self.up_path_2_1(u4_3)
u2_2 = self.up_path_2_2(torch.cat((d2_3, u2_1), 1))
u2_3 = self.up_path_2_3(u2_2)
u1_1 = self.up_path_1_1(u2_3)
u1_2 = self.up_path_1_2(torch.cat((d1, u1_1), 1))
u1_3 = self.up_path_1_3(u1_2)
output = u1_3
return output
class Seg_resid_imp(nn.Module):
def __init__(self, num_class, bn=False):
super(Seg_resid_imp,self).__init__()
self.down_path_1 = conv_bn_rel(1, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_1 = conv_bn_rel(32, 32, 3, stride=2, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_2 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=False,group=2)
self.down_path_2_3 = conv_bn_rel(32, 32, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_1 = conv_bn_rel(32, 64, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_2 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_4_3 = conv_bn_rel(64, 64, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_1 = conv_bn_rel(64, 128, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_2 = conv_bn_rel(128, 128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_8_3 = conv_bn_rel(128, 128, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
self.down_path_16_1 = conv_bn_rel(128, 256, 3, stride=2, active_unit='relu', same_padding=True, bn=bn)
self.down_path_16_2 = conv_bn_rel(256, 256, 3, stride=1, active_unit='relu', same_padding=True, bn=bn)
# output_size = strides * (input_size-1) + kernel_size - 2*padding
self.up_path_8_1 = conv_bn_rel(256, 128, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_8_2 = conv_bn_rel(128+128, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_8_3 = conv_bn_rel(128, 128, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_4_1 = conv_bn_rel(128, 64, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_4_2 = conv_bn_rel(64+64, 64, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_4_3 = conv_bn_rel(64, 64, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_2_1 = conv_bn_rel(64, 64, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_2_2 = conv_bn_rel(64+32, 48, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_2_3 = conv_bn_rel(48, 48, 3, stride=1, active_unit='leaky_relu', same_padding=True, bn=bn)
self.up_path_1_1 = conv_bn_rel(48, 48, 2, stride=2, active_unit='leaky_relu', same_padding=False, bn=bn,reverse=True)
self.up_path_1_2 = conv_bn_rel(48+32, 64, 3, stride=1, active_unit='leaky_relu',same_padding=True, bn=bn)
self.up_path_1_3 = conv_bn_rel(64, num_class, 3, stride=1, active_unit='leaky_relu', same_padding=True)
def forward(self, x):
d1 = self.down_path_1(x)
d2_1 = self.down_path_2_1(d1)
d2_2 = self.down_path_2_2(d2_1)
d2_2 = d2_1 + d2_2
d2_3 = self.down_path_2_3(d2_2)
d2_3 = d2_1 + d2_3
d4_1 = self.down_path_4_1(d2_3)
d4_2 = self.down_path_4_2(d4_1)
d4_2 = d4_1 + d4_2
d4_3 = self.down_path_4_3(d4_2)
d4_3 = d4_2 + d4_3
d8_1 = self.down_path_8_1(d4_3)
d8_2 = self.down_path_8_2(d8_1)
d8_2 = d8_1 + d8_2
d8_3 = self.down_path_8_3(d8_2)
d8_3 = d8_2+ d8_3
d16_1 = self.down_path_16_1(d8_3)
d16_2 = self.down_path_16_2(d16_1)
d16_2 = d16_1 + d16_2
u8_1 = self.up_path_8_1(d16_2)
u8_2 = self.up_path_8_2(torch.cat((d8_3,u8_1),1))
u8_3 = self.up_path_8_3(u8_2)
u8_3 = u8_2 + u8_3
u4_1 = self.up_path_4_1(u8_3)
u4_1 = self.up_path_4_1(d8_3)
u4_2 = self.up_path_4_2(torch.cat((d4_3,u4_1),1))
u4_3 = self.up_path_4_3(u4_2)
u4_3 = u4_2 + u4_3
u2_1 = self.up_path_2_1(u4_3)
u2_2 = self.up_path_2_2(torch.cat((d2_3, u2_1), 1))
u2_3 = self.up_path_2_3(u2_2)
u1_1 = self.up_path_1_1(u2_3)
u1_2 = self.up_path_1_2(torch.cat((d1, u1_1), 1))
u1_3 = self.up_path_1_3(u1_2)
output = u1_3
return output
| 21,891 | 53.593516 | 127 | py |
easyreg | easyreg-master/easyreg/brainstorm.py | """
Framework described in
Data augmentation using learned transformationsfor one-shot medical image segmentation
http://www.mit.edu/~adalca/files/papers/cvpr2019_brainstorm.pdf
"""
from .net_utils import gen_identity_map
import mermaid.finite_differences as fdt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .net_utils import Bilinear
import pynd.segutils as pynd_segutils
class convBlock(nn.Module):
"""
A convolutional block including conv, BN, nonliear activiation, residual connection
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1,
bias=True, batchnorm=False, residual=False, max_pool=False, nonlinear=nn.LeakyReLU(0.2)):
"""
:param in_channels:
:param out_channels:
:param kernel_size:
:param stride:
:param padding:
:param bias:
:param batchnorm:
:param residual:
:param nonlinear:
"""
super(convBlock, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
self.bn = nn.BatchNorm3d(out_channels) if batchnorm else None
self.nonlinear = nonlinear
self.residual = residual
self.max_pool = nn.MaxPool3d(kernel_size=(2,2,2),stride=2) if max_pool else None
def forward(self, x):
x= self.conv(x)
if self.bn:
x = self.bn(x)
if self.nonlinear:
x = self.nonlinear(x)
if self.residual:
x += x
if not self.max_pool:
return x
else:
y= self.max_pool(x)
return x, y
class TransformCVPR2019(nn.Module):
"""
unet architecture for voxelmorph models presented in the CVPR 2018 paper.
You may need to modify this code (e.g., number of layers) to suit your project needs.
:param vol_size: volume size. e.g. (256, 256, 256)
:param enc_nf: list of encoder filters. right now it needs to be 1x4.
e.g. [16,32,32,32]
:param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
:return: the keras reg_model
"""
def __init__(self, img_sz, opt=None):
super(TransformCVPR2019, self).__init__()
self.is_train = opt['tsk_set'][('train',False,'if is in train mode')]
opt_voxelmorph = opt['tsk_set']['reg']['aug_trans_net']
self.initial_reg_factor = opt_voxelmorph[('initial_reg_factor', 1., 'initial regularization factor')]
enc_filters = [16, 32, 32, 32 ]
dec_filters = [32, 32, 32, 32, 32, 16, 16]
self.enc_filter = enc_filters
self.dec_filter = dec_filters
input_channel =2
output_channel= 3
self.input_channel = input_channel
self.output_channel = output_channel
self.img_sz = img_sz
self.spacing = 1. / ( np.array(img_sz) - 1)
self.loss_fn = None #NCCLoss()
self.epoch = -1
self.print_count = 0
self.id_transform = gen_identity_map(self.img_sz, 1.0).cuda()
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
self.bilinear = Bilinear(zero_boundary=True)
for i in range(len(enc_filters)):
if i==0:
self.encoders.append(convBlock(input_channel, enc_filters[i], stride=1, max_pool=True, bias=True))
if i>0 and i<len(enc_filters)-1:
self.encoders.append(convBlock(enc_filters[i-1], enc_filters[i], stride=1,max_pool=True, bias=True))
if i ==len(enc_filters)-1:
self.encoders.append(convBlock(enc_filters[i-1], enc_filters[i], stride=1,max_pool=False, bias=True))
self.decoders.append(convBlock(enc_filters[3] + enc_filters[2],dec_filters[0], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[0] + enc_filters[1],dec_filters[1], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[1] + enc_filters[0],dec_filters[2], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[2],dec_filters[3], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[3],dec_filters[4], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[4],dec_filters[5], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[5], dec_filters[6],stride=1, bias=True))
self.final_conv = nn.Conv3d(dec_filters[6], output_channel, kernel_size=3, stride=1, padding=1, bias=True)
self.flow = nn.Conv3d(output_channel, output_channel, kernel_size=3, stride=1, padding=1, bias=True)
def set_loss_fn(self, loss_fn):
""" set loss function"""
self.loss_fn = loss_fn
def set_cur_epoch(self, cur_epoch=-1):
""" set current epoch"""
self.epoch = cur_epoch
def forward(self, source, target,source_mask=None, target_mask=None):
id_map = self.id_transform.clone()
x_enc_0, x = self.encoders[0](torch.cat((source, target), dim=1))
x_enc_1, x = self.encoders[1](x)
x_enc_2, x = self.encoders[2](x)
x_enc_3 = self.encoders[3](x)
x = F.interpolate(x_enc_3,scale_factor=2)
x = torch.cat((x, x_enc_2),dim=1)
x = self.decoders[0](x)
x = F.interpolate(x, scale_factor=2)
x = torch.cat((x, x_enc_1), dim=1)
x = self.decoders[1](x)
x = F.interpolate(x, scale_factor=2)
x = torch.cat((x, x_enc_0), dim=1)
x = self.decoders[2](x)
x = self.decoders[3](x)
x = self.decoders[4](x)
x = self.decoders[5](x)
x = self.decoders[6](x)
x = self.final_conv(x)
disp_field = self.flow(x)
deform_field = disp_field + id_map
warped_source = self.bilinear(source, deform_field)
self.warped = warped_source
self.target = target
self.disp_field = disp_field
if self.train:
self.print_count += 1
return warped_source, deform_field, disp_field
def get_extra_to_plot(self):
return None, None
def check_if_update_lr(self):
return False, None
def scale_reg_loss(self,sched='l2'):
disp = self.disp_field
fd = fdt.FD_torch(self.spacing*2)
dfx = fd.dXc(disp[:, 0, ...])
dfy = fd.dYc(disp[:, 1, ...])
dfz = fd.dZc(disp[:, 2, ...])
l2 = dfx**2+dfy**2+dfz**2
reg = l2.mean()
return reg
def get_sim_loss(self):
sim_loss = self.loss_fn.get_loss(self.warped,self.target)
sim_loss = sim_loss / self.warped.shape[0]
return sim_loss
def weights_init(self):
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if not m.weight is None:
nn.init.xavier_normal_(m.weight.data)
if not m.bias is None:
m.bias.data.zero_()
def get_loss(self):
reg_factor =self.initial_reg_factor
sim_loss = self.get_sim_loss()
reg_loss = self.scale_reg_loss()
if self.print_count % 10 == 0:
print('current sim loss is{}, current_reg_loss is {}, and reg_factor is {} '.format(sim_loss.item(),
reg_loss.item(),
reg_factor))
return sim_loss+ reg_factor*reg_loss
class AppearanceCVPR2019(nn.Module):
"""
unet architecture for voxelmorph models presented in the CVPR 2018 paper.
You may need to modify this code (e.g., number of layers) to suit your project needs.
:param vol_size: volume size. e.g. (256, 256, 256)
:param enc_nf: list of encoder filters. right now it needs to be 1x4.
e.g. [16,32,32,32]
:param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
:return: the keras reg_model
"""
def __init__(self, img_sz, opt=None):
super(AppearanceCVPR2019, self).__init__()
self.is_train = opt['tsk_set'][('train',False,'if is in train mode')]
opt_voxelmorph = opt['tsk_set']['reg']['aug_appear_net']
self.initial_reg_factor = opt_voxelmorph[('initial_reg_factor', 1., 'initial regularization factor')]
self.sim_factor = opt_voxelmorph[('sim_factor', 1., 'initial regularization factor')]
enc_filters = [16, 32, 32, 32, 32, 32]
dec_filters = [64, 64, 32, 32, 32, 16, 16]
self.enc_filter = enc_filters
self.dec_filter = dec_filters
input_channel =2
output_channel= 3
self.input_channel = 2
self.output_channel = 3
self.img_sz = img_sz
self.spacing = 1. / ( np.array(img_sz) - 1)
self.loss_fn = None #NCCLoss()
self.epoch = -1
self.print_count = 0
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
self.bilinear = Bilinear(zero_boundary=True)
for i in range(len(enc_filters)):
if i == 0:
self.encoders.append(convBlock(input_channel, enc_filters[i], stride=1, max_pool=True, bias=True))
if i > 0 and i < len(enc_filters) - 1:
self.encoders.append(convBlock(enc_filters[i - 1], enc_filters[i], stride=1, max_pool=True, bias=True))
if i == len(enc_filters) - 1:
self.encoders.append(convBlock(enc_filters[i - 1], enc_filters[i], stride=1, max_pool=False, bias=True))
self.decoders.append(convBlock(enc_filters[5] + enc_filters[4], dec_filters[0], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[0] + enc_filters[3], dec_filters[1], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[1] + enc_filters[2], dec_filters[2], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[2] + enc_filters[1], dec_filters[3], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[3] + enc_filters[0], dec_filters[4], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[4], dec_filters[5], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[5], dec_filters[6], stride=1, bias=True))
self.final_conv = nn.Conv3d(dec_filters[6], output_channel, kernel_size=3, stride=1, padding=1, bias=True)
self.color = nn.Conv3d(output_channel, 1, kernel_size=3, stride=1, padding=1, bias=True)
self.mask = None
self.target =None
self.reconst = None
self.delta = None
# identity transform for computing displacement
def set_loss_fn(self, loss_fn):
""" set loss function"""
self.loss_fn = loss_fn
def set_cur_epoch(self, cur_epoch=-1):
""" set current epoch"""
self.epoch = cur_epoch
def forward(self, source, target,source_mask=None, target_mask=None):
x_enc_0,x = self.encoders[0](torch.cat((source, target), dim=1))
x_enc_1,x = self.encoders[1](x)
x_enc_2,x = self.encoders[2](x)
x_enc_3,x = self.encoders[3](x)
x_enc_4,x = self.encoders[4](x)
x_enc_5 = self.encoders[5](x)
x = F.interpolate(x_enc_5,scale_factor=2)
x = torch.cat((x, x_enc_4),dim=1)
x = self.decoders[0](x)
x = F.interpolate(x,size=x_enc_3.shape[2:])
x = torch.cat((x, x_enc_3), dim=1)
x = self.decoders[1](x)
x = F.interpolate(x, scale_factor=2)
x = torch.cat((x, x_enc_2), dim=1)
x = self.decoders[2](x)
x = F.interpolate(x, scale_factor=2)
x = torch.cat((x, x_enc_1), dim=1)
x = self.decoders[3](x)
x = F.interpolate(x, scale_factor=2)
x = torch.cat((x, x_enc_0), dim=1)
x = self.decoders[4](x)
x = self.decoders[5](x)
x = self.decoders[6](x)
x = self.final_conv(x)
delta = self.color(x)
reconst = source + delta
self.delta = delta
self.reconst = reconst
self.target = target
if self.train:
self.print_count += 1
return reconst,delta,delta
def get_extra_to_plot(self):
return None, None
def check_if_update_lr(self):
return False, None
def get_sim_loss(self):
sim_loss = self.loss_fn.get_loss(self.reconst,self.target)
sim_loss = sim_loss / self.reconst.shape[0]
sim_loss = sim_loss *self.sim_factor
return sim_loss
def weights_init(self):
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if not m.weight is None:
nn.init.xavier_normal_(m.weight.data)
if not m.bias is None:
m.bias.data.zero_()
def scale_reg_loss(self):
def __compute_contour(seg_data):
contours = pynd_segutils.seg2contour(seg_data,exclude_zero=True, contour_type='both')[None]
contours[contours > 0] = 1
return torch.Tensor(contours).cuda()
if self.mask is None:
import SimpleITK as sitk
atlas_path = '/playpen-raid/zyshen/data/oai_seg/atlas_label.nii.gz'
seg = sitk.GetArrayFromImage(sitk.ReadImage(atlas_path))
contour = __compute_contour(seg)
self.mask = 1.0 - contour
delta = self.delta
fd = fdt.FD_torch(self.spacing * 2)
dfx = fd.dXc(delta[:, 0, ...])
dfy = fd.dYc(delta[:, 0, ...])
dfz = fd.dZc(delta[:, 0, ...])
dabs = dfx.abs() + dfy.abs() + dfz.abs()
l2 = self.mask*dabs
reg = l2.mean()
return reg
def get_loss(self):
reg_factor = self.initial_reg_factor
sim_loss = self.get_sim_loss()
reg_loss = self.scale_reg_loss()
if self.print_count % 10 == 0:
print('current sim loss is{}, current_reg_loss is {}, and reg_factor is {} '.format(sim_loss.item(),
reg_loss.item(),
reg_factor))
return sim_loss+ reg_factor*reg_loss
| 14,398 | 38.449315 | 120 | py |
easyreg | easyreg-master/easyreg/base_mermaid.py | from time import time
from .base_reg_model import RegModelBase
from .utils import *
import mermaid.finite_differences as fdt
from mermaid.utils import compute_warped_image_multiNC
import tools.image_rescale as ires
from .metrics import get_multi_metric
import SimpleITK as sitk
class MermaidBase(RegModelBase):
"""
the base class of mermaid
"""
def initialize(self, opt):
"""
initialize env parameter in mermaid registration
:param opt: ParameterDict, task setting
:return:
"""
RegModelBase.initialize(self, opt)
self.affine_on = False
self.nonp_on = False
self.afimg_or_afparam = None
self.save_extra_running_resolution_3d_img = opt['tsk_set'][('save_extra_running_resolution_3d_img', False, 'save extra image')]
self.save_original_resol_by_type = opt['tsk_set'][(
'save_original_resol_by_type', [True, True, True, True, True, True, True, True],
'save_original_resol_by_type, save_s, save_t, save_w, save_phi, save_w_inv, save_phi_inv, save_disp, save_extra')]
self.eval_metric_at_original_resol = opt['tsk_set'][
('eval_metric_at_original_resol', False, "evaluate the metric at original resolution")]
self.external_eval = opt['tsk_set'][
('external_eval', '', "evaluate the metric using external metric but should follow easyreg format")]
self.use_01 = True
def get_warped_label_map(self, label_map, phi, sched='nn', use_01=False):
"""
get warped label map
:param label_map: label map to warp
:param phi: transformation map
:param sched: 'nn' neareast neighbor
:param use_01: indicate the input phi is in [0,1] coord; else the phi is assumed to be [-1,1]
:return: the warped label map
"""
if sched == 'nn':
###########TODO fix with new cuda interface, now comment for torch1 compatability
# try:
# print(" the cuda nn interpolation is used")
# warped_label_map = get_nn_interpolation(label_map, phi)
# except:
# warped_label_map = compute_warped_image_multiNC(label_map,phi,self.spacing,spline_order=0,zero_boundary=True,use_01_input=use_01)
warped_label_map = compute_warped_image_multiNC(label_map, phi, self.spacing, spline_order=0,
zero_boundary=True, use_01_input=use_01)
# check if here should be add assert
assert abs(torch.sum(
warped_label_map.detach() - warped_label_map.detach().round())) < 0.1, "nn interpolation is not precise"
else:
raise ValueError(" the label warpping method is not implemented")
return warped_label_map
def get_evaluation(self):
"""
evaluate the transformation by compute overlap on label map and folding in transformation
:return:
"""
s1 = time()
self.output, self.phi, self.afimg_or_afparam, _ = self.forward()
self.inverse_phi = self.network.get_inverse_map()
self.warped_label_map = None
if self.l_moving is not None:
self.warped_label_map = self.get_warped_label_map(self.l_moving, self.phi, use_01=self.use_01)
if not self.eval_metric_at_original_resol:
print("Not take IO cost into consideration, the testing time cost is {}".format(time() - s1))
warped_label_map_np = self.warped_label_map.detach().cpu().numpy()
l_target_np = self.l_target.detach().cpu().numpy()
else:
moving_l_reference_list = self.pair_path[2]
target_l_reference_list = self.pair_path[3]
num_s = len(target_l_reference_list)
assert num_s==1, "when call evaluation in original resolution, the bach num should be set to 1"
phi = (self.phi + 1) / 2. if not self.use_01 else self.phi
_,_, warped_label_map_np,_ = ires.resample_warped_phi_and_image(None,None, moving_l_reference_list[0], target_l_reference_list[0], phi,self.spacing)
warped_label_map_np = warped_label_map_np.detach().cpu().numpy()
lt = [sitk.GetArrayFromImage(sitk.ReadImage(f)) for f in target_l_reference_list]
sz = [num_s, 1] + list(lt[0].shape)
l_target_np= np.stack(lt, axis=0)
l_target_np = l_target_np.reshape(*sz).astype(np.float32)
self.val_res_dic = get_multi_metric(warped_label_map_np, l_target_np, rm_bg=False)
else:
self.val_res_dic = {}
self.jacobi_val = self.compute_jacobi_map((self.phi).detach().cpu().numpy(), crop_boundary=True,
use_01=self.use_01)
print("current batch jacobi is {}".format(self.jacobi_val))
self.eval_extern_metric()
def eval_extern_metric(self):
if len(self.external_eval):
from data_pre.reg_preprocess_example.dirlab_eval import eval_on_dirlab
supported_metric = {"dirlab":eval_on_dirlab}
phi = (self.phi + 1) / 2. if not self.use_01 else self.phi
inverse_phi = (self.inverse_phi + 1) / 2. if not self.use_01 else self.inverse_phi
supported_metric[self.external_eval](phi, inverse_phi, self.fname_list, self.pair_path,moving = self.moving, target=self.target, record_path= self.record_path)
def compute_jacobi_map(self, map, crop_boundary=True, use_01=False, save_jacobi_map=False, appendix='3D'):
"""
compute determinant jacobi on transformatiomm map, the coordinate should be canonical.
:param map: the transformation map
:param crop_boundary: if crop the boundary, then jacobi analysis would only analysis on cropped map
:param use_01: infer the input map is in[0,1] else is in [-1,1]
:return: the sum of absolute value of negative determinant jacobi, the num of negative determinant jacobi voxels
"""
if type(map) == torch.Tensor:
map = map.detach().cpu().numpy()
span = 1.0 if use_01 else 2.0
spacing = self.spacing * span # the disp coorindate is [-1,1]
fd = fdt.FD_np(spacing)
dfx = fd.dXc(map[:, 0, ...])
dfy = fd.dYc(map[:, 1, ...])
dfz = fd.dZc(map[:, 2, ...])
jacobi_det = dfx * dfy * dfz
if crop_boundary:
crop_range = 5
jacobi_det_croped = jacobi_det[:, crop_range:-crop_range, crop_range:-crop_range, crop_range:-crop_range]
jacobi_abs_croped = - np.sum(jacobi_det_croped[jacobi_det_croped < 0.]) #
jacobi_num_croped = np.sum(jacobi_det_croped < 0.)
print("Cropped! the jacobi_value of fold points for current batch is {}".format(jacobi_abs_croped))
print("Cropped! the number of fold points for current batch is {}".format(jacobi_num_croped))
# self.temp_save_Jacobi_image(jacobi_det,map)
jacobi_abs = - np.sum(jacobi_det[jacobi_det < 0.]) #
jacobi_num = np.sum(jacobi_det < 0.)
print("print folds for each channel {},{},{}".format(np.sum(dfx < 0.), np.sum(dfy < 0.), np.sum(dfz < 0.)))
print("the jacobi_value of fold points for current batch is {}".format(jacobi_abs))
print("the number of fold points for current batch is {}".format(jacobi_num))
jacobi_abs_mean = jacobi_abs / map.shape[0]
jacobi_num_mean = jacobi_num / map.shape[0]
self.jacobi_map = None
jacobi_abs_map = np.abs(jacobi_det)
if save_jacobi_map:
jacobi_neg_map = np.zeros_like(jacobi_det)
jacobi_neg_map[jacobi_det < 0] = 1
for i in range(jacobi_abs_map.shape[0]):
jacobi_img = sitk.GetImageFromArray(jacobi_abs_map[i])
jacobi_neg_img = sitk.GetImageFromArray(jacobi_neg_map[i])
jacobi_img.SetSpacing(np.flipud(self.spacing))
jacobi_neg_img.SetSpacing(np.flipud(self.spacing))
jacobi_saving = os.path.join(self.record_path, appendix)
os.makedirs(jacobi_saving, exist_ok=True)
pth = os.path.join(jacobi_saving,
self.fname_list[i] + "_iter_" + str(self.iter_count) + '_jacobi_img.nii')
n_pth = os.path.join(jacobi_saving,
self.fname_list[i] + "_iter_" + str(self.iter_count) + '_jacobi_neg_img.nii')
sitk.WriteImage(jacobi_img, pth)
sitk.WriteImage(jacobi_neg_img, n_pth)
self.jacobi_map = jacobi_abs_map
return jacobi_abs_mean, jacobi_num_mean
def get_extra_to_plot(self):
"""
extra image needs to be plot
:return: image to plot, name
"""
return None, None
def save_fig(self, phase):
"""
save 2d center slice from x,y, z axis, for moving, target, warped, l_moving (optional), l_target(optional), (l_warped)
:param phase: train|val|test|debug
:return:
"""
from .visualize_registration_results import show_current_images
visual_param = {}
visual_param['visualize'] = False
visual_param['save_fig'] = True
visual_param['save_fig_path'] = self.record_path
visual_param['save_fig_path_byname'] = os.path.join(self.record_path, 'byname')
visual_param['save_fig_path_byiter'] = os.path.join(self.record_path, 'byiter')
visual_param['save_fig_num'] = 4
visual_param['pair_name'] = self.fname_list
visual_param['iter'] = phase + "_iter_" + str(self.iter_count)
disp = None
extra_title = 'disp'
extraImage, extraName = self.get_extra_to_plot()
if self.save_extra_running_resolution_3d_img and extraImage is not None:
self.save_extra_img(extraImage, extraName)
if self.afimg_or_afparam is not None and len(self.afimg_or_afparam.shape) > 2 and not self.nonp_on:
raise ValueError("displacement field is removed from current version")
# disp = ((self.afimg_or_afparam[:,...]**2).sum(1))**0.5
if self.nonp_on and self.afimg_or_afparam is not None:
disp = self.afimg_or_afparam[:, 0, ...]
extra_title = 'affine'
if self.jacobi_map is not None and self.nonp_on:
disp = self.jacobi_map
extra_title = 'jacobi det'
show_current_images(self.iter_count, iS=self.moving, iT=self.target, iW=self.output,
iSL=self.l_moving, iTL=self.l_target, iWL=self.warped_label_map,
vizImages=disp, vizName=extra_title, phiWarped=self.phi,
visual_param=visual_param, extraImages=extraImage, extraName=extraName)
def _save_image_into_original_sz_with_given_reference(self, pair_path, phis, inverse_phis=None, use_01=False):
"""
the images (moving, target, warped, transformation map, inverse transformation map world coord[0,1] ) are saved in record_path/original_sz
:param pair_path: list, moving image path, target image path
:param phis: transformation map BDXYZ
:param inverse_phi: inverse transformation map BDXYZ
:param use_01: indicate the transformation use [0,1] coord or [-1,1] coord
:return:
"""
save_original_resol_by_type = self.save_original_resol_by_type
save_s, save_t, save_w, save_phi, save_w_inv, save_phi_inv, save_disp, save_extra_not_used_here = save_original_resol_by_type
spacing = self.spacing
moving_reference_list = pair_path[0]
target_reference_list = pair_path[1]
moving_l_reference_list = None
target_l_reference_list = None
if len(pair_path) == 4:
moving_l_reference_list = pair_path[2]
target_l_reference_list = pair_path[3]
phis = (phis + 1) / 2. if not use_01 else phis
saving_original_sz_path = os.path.join(self.record_path, 'original_sz')
os.makedirs(saving_original_sz_path, exist_ok=True)
for i in range(len(moving_reference_list)):
moving_reference = moving_reference_list[i]
target_reference = target_reference_list[i]
moving_l_reference = moving_l_reference_list[i] if moving_l_reference_list else None
target_l_reference = target_l_reference_list[i] if target_l_reference_list else None
fname = self.fname_list[i]
phi = phis[i:i+1]
inverse_phi = inverse_phis[i:i+1] if inverse_phis is not None else None
# new_phi, warped, warped_l, new_spacing = ires.resample_warped_phi_and_image(moving_reference, target_reference,
# moving_l_reference,target_l_reference, phi, spacing)
new_phi, warped, warped_l, new_spacing = ires.resample_warped_phi_and_image(moving_reference,target_reference,
moving_l_reference,
target_l_reference, phi,
spacing)
if save_phi or save_disp:
if save_phi:
ires.save_transfrom(new_phi, new_spacing, saving_original_sz_path, [fname])
if save_disp:
cur_fname = fname + '_disp'
id_map = gen_identity_map(warped.shape[2:], resize_factor=1., normalized=True).cuda()
id_map = (id_map[None] + 1) / 2.
disp = new_phi - id_map
ires.save_transform_with_reference(disp, new_spacing, [moving_reference], [target_reference],
path=saving_original_sz_path, fname_list=[cur_fname],
save_disp_into_itk_format=True)
del id_map, disp
del new_phi, phi
if save_w:
cur_fname = fname + '_warped'
ires.save_image_with_given_reference(warped, [target_reference], saving_original_sz_path, [cur_fname])
if warped_l is not None:
cur_fname = fname + '_warped_l'
ires.save_image_with_given_reference(warped_l, [target_l_reference], saving_original_sz_path, [cur_fname])
del warped
if save_s:
cur_fname = fname + '_moving'
ires.save_image_with_given_reference(None, [moving_reference], saving_original_sz_path, [cur_fname])
if moving_l_reference is not None:
cur_fname = fname + '_moving_l'
ires.save_image_with_given_reference(None, [moving_l_reference], saving_original_sz_path, [cur_fname])
if save_t:
cur_fname = fname + '_target'
ires.save_image_with_given_reference(None, [target_reference], saving_original_sz_path, [cur_fname])
if target_l_reference is not None:
cur_fname = fname + '_target_l'
ires.save_image_with_given_reference(None, [target_l_reference], saving_original_sz_path, [cur_fname])
if inverse_phi is not None:
inverse_phi = (inverse_phi + 1) / 2. if not use_01 else inverse_phi
new_inv_phi, inv_warped, inv_warped_l, new_spacing = ires.resample_warped_phi_and_image(
target_reference,moving_reference, target_l_reference, moving_l_reference,inverse_phi, spacing)
if save_phi_inv:
cur_fname = fname + '_inv'
ires.save_transfrom(new_inv_phi, new_spacing, saving_original_sz_path, [cur_fname])
if save_w_inv:
cur_fname = fname + '_inv_warped'
ires.save_image_with_given_reference(inv_warped, [moving_reference], saving_original_sz_path,
[cur_fname])
if moving_l_reference is not None:
cur_fname = fname + '_inv_warped_l'
ires.save_image_with_given_reference(inv_warped_l, [moving_l_reference], saving_original_sz_path,
[cur_fname])
if save_disp:
cur_fname = fname + '_inv_disp'
id_map = gen_identity_map(inv_warped.shape[2:], resize_factor=1., normalized=True).cuda()
id_map = (id_map[None] + 1) / 2.
inv_disp = new_inv_phi - id_map
ires.save_transform_with_reference(inv_disp, new_spacing, [target_reference], [moving_reference],
path=saving_original_sz_path, fname_list=[cur_fname],
save_disp_into_itk_format=True)
del id_map, inv_disp
del new_inv_phi, inv_warped, inverse_phi
def save_extra_img(self, img, title):
"""
the propose of this function is for visualize the reg performance
the extra image not include moving, target, warped, transformation map, which can refers to save save_fig_3D, save_deformation
this function is for result analysis, for the saved image sz is equal to input_sz
the physical information like origin, orientation is not saved, todo, include this information
:param img: extra image, BCXYZ
:param title: extra image name
:return:
"""
import SimpleITK as sitk
import nibabel as nib
num_img = img.shape[0]
assert (num_img == len(self.fname_list))
input_img_sz = self.input_img_sz if not self.save_original_resol_by_type[-1] else self.original_im_sz[
0].cpu().numpy().tolist() # [int(self.img_sz[i] * self.input_resize_factor[i]) for i in range(len(self.img_sz))]
# img = get_resampled_image(img, self.spacing, desiredSize=[num_img, 1] + input_img_sz, spline_order=1)
img_np = img.cpu().numpy()
for i in range(num_img):
if img_np.shape[1] == 1:
img_to_save = img_np[i, 0]
fpath = os.path.join(self.record_path,
self.fname_list[i] + '_{:04d}'.format(self.cur_epoch + 1) + title + '.nii.gz')
img_to_save = sitk.GetImageFromArray(img_to_save)
img_to_save.SetSpacing(np.flipud(self.spacing))
sitk.WriteImage(img_to_save, fpath)
else:
multi_ch_img = nib.Nifti1Image(img_np[i], np.eye(4))
fpath = os.path.join(self.record_path, self.fname_list[i] + '_{:04d}'.format(
self.cur_epoch + 1) + "_" + title + '.nii.gz')
nib.save(multi_ch_img, fpath)
def save_deformation(self):
"""
save deformation in [0,1] coord, no physical spacing is included
:return:
"""
import nibabel as nib
phi_np = self.phi.detach().cpu().numpy()
phi_np = phi_np if self.use_01 else (phi_np + 1.) / 2. # normalize the phi into 0, 1
for i in range(phi_np.shape[0]):
phi = nib.Nifti1Image(phi_np[i], np.eye(4))
nib.save(phi, os.path.join(self.record_path, self.fname_list[i]) + '_phi.nii.gz')
# if self.affine_on:
# # todo the affine param is assumed in -1, 1 phi coord, to be fixed into 0,1 coord
# affine_param = self.afimg_or_afparam
# if isinstance(affine_param, list):
# affine_param = self.afimg_or_afparam[0]
# affine_param = affine_param.detach().cpu().numpy()
# for i in range(affine_param.shape[0]):
# np.save(os.path.join(self.record_path, self.fname_list[i]) + '_affine_param.npy', affine_param[i])
| 20,184 | 54.30137 | 171 | py |
easyreg | easyreg-master/easyreg/voxel_morph.py | """
registration network described in voxelmorph
An experimental pytorch implemetation, the official tensorflow please refers to https://github.com/voxelmorph/voxelmorph
An Unsupervised Learning Model for Deformable Medical Image Registration
Guha Balakrishnan, Amy Zhao, Mert R. Sabuncu, John Guttag, Adrian V. Dalca
CVPR 2018. eprint arXiv:1802.02604
Unsupervised Learning for Fast Probabilistic Diffeomorphic Registration
Adrian V. Dalca, Guha Balakrishnan, John Guttag, Mert R. Sabuncu
MICCAI 2018. eprint arXiv:1805.04605
"""
from .net_utils import gen_identity_map
import mermaid.finite_differences as fdt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .net_utils import Bilinear
from mermaid.libraries.modules import stn_nd
from .affine_net import AffineNetSym
from .utils import sigmoid_decay
class convBlock(nn.Module):
"""
A convolutional block including conv, BN, nonliear activiation, residual connection
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1,
bias=True, batchnorm=False, residual=False, nonlinear=nn.LeakyReLU(0.2)):
"""
:param in_channels:
:param out_channels:
:param kernel_size:
:param stride:
:param padding:
:param bias:
:param batchnorm:
:param residual:
:param nonlinear:
"""
super(convBlock, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
self.bn = nn.BatchNorm3d(out_channels) if batchnorm else None
self.nonlinear = nonlinear
self.residual = residual
def forward(self, x):
x= self.conv(x)
if self.bn:
x = self.bn(x)
if self.nonlinear:
x = self.nonlinear(x)
if self.residual:
x += x
return x
class VoxelMorphCVPR2018(nn.Module):
"""
unet architecture for voxelmorph models presented in the CVPR 2018 paper.
You may need to modify this code (e.g., number of layers) to suit your project needs.
:param vol_size: volume size. e.g. (256, 256, 256)
:param enc_nf: list of encoder filters. right now it needs to be 1x4.
e.g. [16,32,32,32]
:param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
:return: the reg_model
"""
def __init__(self, img_sz, opt=None):
super(VoxelMorphCVPR2018, self).__init__()
self.is_train = opt['tsk_set'][('train',False,'if is in train mode')]
opt_voxelmorph = opt['tsk_set']['reg']['vm_cvpr']
self.load_trained_affine_net = opt_voxelmorph[('load_trained_affine_net',False,'if true load_trained_affine_net; if false, the affine network is not initialized')]
self.using_affine_init = opt_voxelmorph[("using_affine_init",False, "deploy affine network before the nonparametric network")]
self.affine_init_path = opt_voxelmorph[('affine_init_path','',"the path of pretrained affine model")]
self.affine_refine_step = opt_voxelmorph[('affine_refine_step', 5, "the multi-step num in affine refinement")]
self.initial_reg_factor = opt_voxelmorph[('initial_reg_factor', 1., 'initial regularization factor')]
self.min_reg_factor = opt_voxelmorph[('min_reg_factor', 1., 'minimum of regularization factor')]
enc_filters = [16, 32, 32, 32, 32]
#dec_filters = [32, 32, 32, 8, 8]
dec_filters = [32, 32, 32, 32, 32, 16, 16]
self.enc_filter = enc_filters
self.dec_filter = dec_filters
input_channel =2
output_channel= 3
self.input_channel = 2
self.output_channel = 3
self.img_sz = img_sz
self.spacing = 1. / ( np.array(img_sz) - 1)
self.loss_fn = None #NCCLoss()
self.epoch = -1
self.print_count = 0
def set_cur_epoch(self, cur_epoch=-1):
""" set current epoch"""
self.epoch = cur_epoch
if self.using_affine_init:
self.init_affine_net(opt)
self.id_transform = None
else:
self.id_transform = gen_identity_map(self.img_sz, 1.0).cuda()
print("Attention, the affine net is not used")
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
self.bilinear = Bilinear(zero_boundary=True)
for i in range(len(enc_filters)):
if i==0:
self.encoders.append(convBlock(input_channel, enc_filters[i], stride=1, bias=True))
else:
self.encoders.append(convBlock(enc_filters[i-1], enc_filters[i], stride=2, bias=True))
self.decoders.append(convBlock(enc_filters[-1], dec_filters[0], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[0] + enc_filters[3],dec_filters[1], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[1] + enc_filters[2],dec_filters[2], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[2] + enc_filters[1],dec_filters[3], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[3], dec_filters[4],stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[4] + enc_filters[0],dec_filters[5], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[5], dec_filters[6],stride=1, bias=True))
self.flow = nn.Conv3d(dec_filters[-1], output_channel, kernel_size=3, stride=1, padding=1, bias=True)
# identity transform for computing displacement
def set_loss_fn(self, loss_fn):
""" set loss function"""
self.loss_fn = loss_fn
def init_affine_net(self,opt):
self.affine_net = AffineNetSym(self.img_sz, opt)
self.affine_net.compute_loss = False
self.affine_net.epoch_activate_sym = 1e7 # todo to fix this unatural setting
self.affine_net.set_step(self.affine_refine_step)
model_path = self.affine_init_path
if self.load_trained_affine_net and self.is_train:
checkpoint = torch.load(model_path, map_location='cpu')
self.affine_net.load_state_dict(checkpoint['state_dict'])
self.affine_net.cuda()
print("Affine model is initialized!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
else:
print(
"The Affine model is added, but not initialized, this should only take place when a complete checkpoint (including affine model) will be loaded")
self.affine_net.eval()
def forward(self, source, target,source_mask=None, target_mask=None):
if self.using_affine_init:
with torch.no_grad():
affine_img, affine_map, _ = self.affine_net(source, target)
else:
affine_map = self.id_transform.clone()
affine_img = source
x_enc_1 = self.encoders[0](torch.cat((affine_img, target), dim=1))
# del input
x_enc_2 = self.encoders[1](x_enc_1)
x_enc_3 = self.encoders[2](x_enc_2)
x_enc_4 = self.encoders[3](x_enc_3)
x_enc_5 = self.encoders[4](x_enc_4)
x = self.decoders[0](x_enc_5)
x = F.interpolate(x,scale_factor=2,mode='trilinear')
x = torch.cat((x, x_enc_4),dim=1)
x = self.decoders[1](x)
x = F.interpolate(x, scale_factor=2, mode='trilinear')
x = torch.cat((x, x_enc_3), dim=1)
x = self.decoders[2](x)
x = F.interpolate(x, scale_factor=2, mode='trilinear')
x = torch.cat((x, x_enc_2), dim=1)
x = self.decoders[3](x)
x = self.decoders[4](x)
x = F.interpolate(x, scale_factor=2, mode='trilinear')
x = torch.cat((x, x_enc_1), dim=1)
x = self.decoders[5](x)
x = self.decoders[6](x)
disp_field = self.flow(x)
#del x_dec_5, x_enc_1
deform_field = disp_field + affine_map
warped_source = self.bilinear(source, deform_field)
self.warped = warped_source
self.target = target
self.disp_field = disp_field
self.source = source
if self.train:
self.print_count += 1
return warped_source, deform_field, disp_field
def get_extra_to_plot(self):
return None, None
def check_if_update_lr(self):
return False, None
def scale_reg_loss(self,sched='l2'):
disp = self.disp_field
fd = fdt.FD_torch(self.spacing*2)
dfx = fd.dXc(disp[:, 0, ...])
dfy = fd.dYc(disp[:, 1, ...])
dfz = fd.dZc(disp[:, 2, ...])
l2 = dfx**2+dfy**2+dfz**2
reg = l2.mean()
return reg
def get_sim_loss(self):
sim_loss = self.loss_fn.get_loss(self.warped,self.target)
sim_loss = sim_loss / self.warped.shape[0]
return sim_loss
def weights_init(self):
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if not m.weight is None:
nn.init.xavier_normal_(m.weight.data)
if not m.bias is None:
m.bias.data.zero_()
def get_reg_factor(self):
factor = self.initial_reg_factor # 1e-7
factor = float(max(sigmoid_decay(self.epoch, static=5, k=4) * factor, self.min_reg_factor))
return factor
def get_loss(self):
reg_factor = self.get_reg_factor()
sim_loss = self.get_sim_loss()
reg_loss = self.scale_reg_loss()
if self.print_count % 10 == 0:
print('current sim loss is{}, current_reg_loss is {}, and reg_factor is {} '.format(sim_loss.item(),
reg_loss.item(),
reg_factor))
return sim_loss+ reg_factor*reg_loss
def get_inverse_map(self, use_01=False):
# TODO not test yet
print("VoxelMorph approach doesn't support analytical computation of inverse map")
print("Instead, we compute it's numerical approximation")
_, inverse_map, _ = self.forward(self.target, self.source)
return inverse_map
# def cal_affine_loss(self,output=None,afimg_or_afparam=None,using_decay_factor=False):
# factor = 1.0
# if using_decay_factor:
# factor = sigmoid_decay(self.cur_epoch,static=5, k=4)*factor
# if self.loss_fn.criterion is not None:
# sim_loss = self.loss_fn.get_loss(output,self.target)
# else:
# sim_loss = self.network.get_sim_loss(output,self.target)
# reg_loss = self.network.scale_reg_loss(afimg_or_afparam) if afimg_or_afparam is not None else 0.
# if self.iter_count%10==0:
# print('current sim loss is{}, current_reg_loss is {}, and reg_factor is {} '.format(sim_loss.item(), reg_loss.item(),factor))
# return sim_loss+reg_loss*factor
class VoxelMorphMICCAI2019(nn.Module):
"""
unet architecture for voxelmorph models presented in the MICCAI 2019 paper.
You may need to modify this code (e.g., number of layers) to suit your project needs.
:param vol_size: volume size. e.g. (256, 256, 256)
:param enc_nf: list of encoder filters. right now it needs to be 1x4.
e.g. [16,32,32,32]
:param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
:return: the reg_model
"""
def __init__(self, img_sz, opt=None):
super(VoxelMorphMICCAI2019, self).__init__()
self.is_train = opt['tsk_set'][('train', False, 'if is in train mode')]
opt_voxelmorph = opt['tsk_set']['reg']['vm_miccai']
self.load_trained_affine_net = opt_voxelmorph[('load_trained_affine_net', False,
'if true load_trained_affine_net; if false, the affine network is not initialized')]
self.affine_refine_step = opt_voxelmorph[('affine_refine_step', 5, "the multi-step num in affine refinement")]
self.using_affine_init = opt_voxelmorph[("using_affine_init", False, "deploy affine network before the nonparametric network")]
self.affine_init_path = opt_voxelmorph[('affine_init_path', '', "the path of pretrained affine model")]
enc_filters = [16, 32, 32, 32, 32]
#dec_filters = [32, 32, 32, 8, 8]
dec_filters = [32, 32, 32, 32, 16]
self.enc_filter = enc_filters
self.dec_filter = dec_filters
input_channel =2
output_channel= 3
self.input_channel = input_channel
self.output_channel = output_channel
self.img_sz = img_sz
self.low_res_img_sz = [int(x/2) for x in img_sz]
self.spacing = 1. / ( np.array(img_sz) - 1)
self.int_steps = 7
self.image_sigma = opt_voxelmorph[('image_sigma',0.02,'image_sigma')]
self.prior_lambda =opt_voxelmorph[('lambda_factor_in_vmr',50,'lambda_factor_in_vmr')]
self.prior_lambda_mean =opt_voxelmorph[('lambda_mean_factor_in_vmr',50,'lambda_mean_factor_in_vmr')]
self.flow_vol_shape = self.low_res_img_sz
self.D = self._degree_matrix(self.flow_vol_shape)
self.D = (self.D).cuda()# 1, 96, 40,40 3'
self.loss_fn = None
if self.using_affine_init:
self.init_affine_net(opt)
self.id_transform = None
else:
self.id_transform = gen_identity_map(self.img_sz, 1.0).cuda()
self.id_transform =self.id_transform.view([1]+list(self.id_transform.shape))
print("Attention, the affine net is not used")
"""to compatiable to the mesh setting in voxel morph"""
self.low_res_id_transform = gen_identity_map(self.img_sz, 0.5, normalized=False).cuda()
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
#self.bilinear = Bilinear(zero_boundary=True)
self.bilinear = stn_nd.STN_ND_BCXYZ(np.array([1.,1.,1.]),zero_boundary=True)
self.bilinear_img = Bilinear(zero_boundary=True)
for i in range(len(enc_filters)):
if i==0:
self.encoders.append(convBlock(input_channel, enc_filters[i], stride=1, bias=True))
else:
self.encoders.append(convBlock(enc_filters[i-1], enc_filters[i], stride=2, bias=True))
self.decoders.append(convBlock(enc_filters[-1], dec_filters[0], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[0] + enc_filters[3],dec_filters[1], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[1] + enc_filters[2],dec_filters[2], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[2] + enc_filters[1],dec_filters[3], stride=1, bias=True))
self.decoders.append(convBlock(dec_filters[3], dec_filters[4],stride=1, bias=True))
self.flow_mean = nn.Conv3d(dec_filters[-1], output_channel, kernel_size=3, stride=1, padding=1, bias=True)
self.flow_sigma = nn.Conv3d(dec_filters[-1], output_channel, kernel_size=3, stride=1, padding=1, bias=True)
self.flow_mean.weight.data.normal_(0.,1e-5)
self.flow_sigma.weight.data.normal_(0.,1e-10)
self.flow_sigma.bias.data = torch.Tensor([-10]*3)
self.print_count=0
# identity transform for computing displacement
def scale_map(self,map, spacing):
"""
Scales the map to the [-1,1]^d format
:param map: map in BxCxXxYxZ format
:param spacing: spacing in XxYxZ format
:return: returns the scaled map
"""
sz = map.size()
map_scaled = torch.zeros_like(map)
ndim = len(spacing)
# This is to compensate to get back to the [-1,1] mapping of the following form
# id[d]*=2./(sz[d]-1)
# id[d]-=1.
for d in range(ndim):
if sz[d + 2] > 1:
map_scaled[:, d, ...] = map[:, d, ...] * (2. / (sz[d + 2] - 1.) / spacing[d])
else:
map_scaled[:, d, ...] = map[:, d, ...]
return map_scaled
def set_loss_fn(self, loss_fn):
""" set loss function"""
pass
def init_affine_net(self,opt):
self.affine_net = AffineNetSym(self.img_sz, opt)
self.affine_net.compute_loss = False
self.affine_net.epoch_activate_sym = 1e7 # todo to fix this unatural setting
self.affine_net.set_step(self.affine_refine_step)
model_path = self.affine_init_path
if self.load_trained_affine_net and self.is_train:
checkpoint = torch.load(model_path, map_location='cpu')
self.affine_net.load_state_dict(checkpoint['state_dict'])
self.affine_net.cuda()
print("Affine model is initialized!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
else:
print(
"The Affine model is added, but not initialized, this should only take place when a complete checkpoint (including affine model) will be loaded")
self.affine_net.eval()
def forward(self, source, target,source_mask=None, target_mask=None):
self.__do_some_clean()
if self.using_affine_init:
with torch.no_grad():
affine_img, affine_map, _ = self.affine_net(source, target)
else:
affine_map = self.id_transform.clone()
affine_img = source
x_enc_1 = self.encoders[0](torch.cat((affine_img, target), dim=1))
# del input
x_enc_2 = self.encoders[1](x_enc_1)
x_enc_3 = self.encoders[2](x_enc_2)
x_enc_4 = self.encoders[3](x_enc_3)
x_enc_5 = self.encoders[4](x_enc_4)
x = self.decoders[0](x_enc_5)
x = F.interpolate(x,scale_factor=2,mode='trilinear')
x = torch.cat((x, x_enc_4),dim=1)
x = self.decoders[1](x)
x = F.interpolate(x, scale_factor=2, mode='trilinear')
x = torch.cat((x, x_enc_3), dim=1)
x = self.decoders[2](x)
x = F.interpolate(x, scale_factor=2, mode='trilinear')
x = torch.cat((x, x_enc_2), dim=1)
x = self.decoders[3](x)
x = self.decoders[4](x)
flow_mean = self.flow_mean(x)
log_sigma = self.flow_sigma(x)
noise = torch.randn(flow_mean.shape).cuda()
flow = flow_mean + torch.exp(log_sigma / 2.0) * noise
#print("the min and max of flow_mean is {} {}, of the flow is {},{} ".format(flow_mean.min(),flow_mean.max(),flow.min(), flow.max()))
for _ in range(self.int_steps):
deform_field = flow + self.low_res_id_transform
flow_1 = self.bilinear(flow, deform_field)
flow = flow_1+ flow
#print("the min and max of self.low_res_id_transform after is {} {}".format( self.low_res_id_transform.min(), self.low_res_id_transform.max()))
#print("the min and max of flow after is {} {}".format(flow.min(),flow.max()))
disp_field = F.interpolate(flow, scale_factor=2, mode='trilinear')
disp_field=self.scale_map(disp_field,np.array([1,1,1]))
deform_field = disp_field + affine_map
#print("the min and max of disp_filed is {} {}, of the deform field is {},{} ".format(disp_field.min(),disp_field.max(),deform_field.min(), deform_field.max()))
warped_source = self.bilinear_img(source, deform_field)
self.afimg_or_afparam = disp_field
self.res_flow_mean = flow #flow_mean TODO in original code here is flow_mean, but it doesn't work
self.res_log_sigma = log_sigma
self.warped = warped_source
self.target = target
self.source = source
if self.train:
self.print_count += 1
return warped_source, deform_field, disp_field
def check_if_update_lr(self):
return False, None
def get_extra_to_plot(self):
return None, None
def __do_some_clean(self):
self.afimg_or_afparam = None
self.res_flow_mean = None
self.res_log_sigma = None
self.warped = None
self.target = None
self.source = None
def scale_reg_loss(self,disp=None,sched='l2'):
reg = self.kl_loss()
return reg
def get_sim_loss(self, warped=None, target=None):
loss = self.recon_loss()
return loss
def _adj_filt(self, ndims):
"""
compute an adjacency filter that, for each feature independently,
has a '1' in the immediate neighbor, and 0 elsewehre.
so for each filter, the filter has 2^ndims 1s.
the filter is then setup such that feature i outputs only to feature i
"""
# inner filter, that is 3x3x...
filt_inner = np.zeros([3] * ndims) # 3 3 3
for j in range(ndims):
o = [[1]] * ndims
o[j] = [0, 2]
filt_inner[np.ix_(*o)] = 1
# full filter, that makes sure the inner filter is applied
# ith feature to ith feature
filt = np.zeros([ndims, ndims]+ [3] * ndims ) # 3 3 3 3 ##!!!!!!!! in out w h d
for i in range(ndims):
filt[ i, i,...] = filt_inner ##!!!!!!!!
return filt
def _degree_matrix(self, vol_shape):
# get shape stats
ndims = len(vol_shape)
sz = [ndims,*vol_shape] # 96 96 40 3 ##!!!!!!!!
# prepare conv kernel
conv_fn = F.conv3d ##!!!!!!!!
# prepare tf filter
z = torch.ones([1] + sz) # 1 96 96 40 3
filt_tf = torch.Tensor(self._adj_filt(ndims)) # 3 3 3 3 ##!!!!!!!!
strides = [1] * (ndims) ##!!!!!!!!
return conv_fn(z, filt_tf, padding= 1, stride =strides) ##!!!!!!!!
def prec_loss(self, disp): ##!!!!!!!!
"""
a more manual implementation of the precision matrix term
mu * P * mu where P = D - A
where D is the degree matrix and A is the adjacency matrix
mu * P * mu = 0.5 * sum_i mu_i sum_j (mu_i - mu_j) = 0.5 * sum_i,j (mu_i - mu_j) ^ 2
where j are neighbors of i
Note: could probably do with a difference filter,
but the edges would be complicated unless tensorflow allowed for edge copying
"""
fd = fdt.FD_torch(np.array([1.,1.,1.]))
dfx = fd.dXc(disp[:, 0, ...])
dfy = fd.dYc(disp[:, 1, ...])
dfz = fd.dZc(disp[:, 2, ...])
l2 = dfx ** 2 + dfy ** 2 + dfz ** 2
reg = l2.mean()
return reg * 0.5
def kl_loss(self):
"""
KL loss
y_pred is assumed to be D*2 channels: first D for mean, next D for logsigma
D (number of dimensions) should be 1, 2 or 3
y_true is only used to get the shape
"""
# prepare inputs
ndims = 3
flow_mean = self.res_flow_mean
log_sigma = self.res_log_sigma
# compute the degree matrix (only needs to be done once)
# we usually can't compute this until we know the ndims,
# which is a function of the data
# sigma terms
sigma_term = self.prior_lambda * self.D * torch.exp(log_sigma) - log_sigma ##!!!!!!!!
sigma_term = torch.mean(sigma_term) ##!!!!!!!!
# precision terms
# note needs 0.5 twice, one here (inside self.prec_loss), one below
prec_term = self.prior_lambda_mean * self.prec_loss(flow_mean) # this is the jacobi loss
if self.print_count%10==0:
print("the loss of neg log_sigma is {}, the sigma term is {}, the loss of the prec term is {}".format((-log_sigma).mean().item(),sigma_term,prec_term))
# combine terms
return 0.5 * ndims * (sigma_term + prec_term) # ndims because we averaged over dimensions as well
def recon_loss(self):
""" reconstruction loss """
y_pred = self.warped
y_true = self.target
return 1. / (self.image_sigma ** 2) * torch.mean((y_true - y_pred)**2) ##!!!!!!!!
def get_loss(self):
sim_loss = self.get_sim_loss()
reg_loss = self.scale_reg_loss()
return sim_loss+ reg_loss
def get_inverse_map(self,use_01=False):
# TODO not test yet
print("VoxelMorph approach doesn't support analytical computation of inverse map")
print("Instead, we compute it's numerical approximation")
_, inverse_map,_ = self.forward(self.target, self.source)
return inverse_map
def weights_init(self):
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if not m.weight is None:
nn.init.xavier_normal_(m.weight.data)
if not m.bias is None:
m.bias.data.zero_()
#
# def test():
# from mermaid.module_parameters import ParameterDict
# cuda = torch.device('cuda:0')
# test_cvpr = True
# opt = ParameterDict()
# if test_cvpr:
# # unet = UNet_light2(2,3).to(cuda)
# net = VoxelMorphCVPR2018([80, 192, 192],opt).to(cuda)
# else:
# net = VoxelMorphMICCAI2019([80,192,192],opt).to(cuda)
# print(net)
# with torch.enable_grad():
# input1 = torch.randn(1, 1, 80, 192, 192).to(cuda)
# input2 = torch.randn(1, 1, 80, 192, 192).to(cuda)
# disp_field, warped_input1, deform_field = net(input1, input2)
# loss = net.get_loss()
# print("The loss is {}".format(loss))
#
# pass
#
# if __name__ == '__main__':
# test()
| 25,457 | 40.194175 | 171 | py |
easyreg | easyreg-master/easyreg/mermaid_net.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .utils import *
from .affine_net import *
from .momentum_net import *
import mermaid.module_parameters as pars
import mermaid.model_factory as py_mf
import mermaid.utils as py_utils
from functools import partial
from mermaid.libraries.functions.stn_nd import STNFunction_ND_BCXYZ
class MermaidNet(nn.Module):
"""
this network is an end to end system for momentum generation and mermaid registration
include the following parts
1 . (optional) affine net the affine network is used to affine the source and target image
2. the momentum generation net work, this network is a u-net like encoder decoder
3. the mermaid part, an map-based registration model would be called from the Mermaid tookit
In detail of implementation, we should take care of the memory issue, one possible solution is using low-resolution mapping and then upsampling the transformation map
1. affine network, this is a pretrained network, so only the forward model is used,
in current design, the input and output of this net is not downsampled
2. momentum generation net, this is a trainable network, but we would have a low-res factor to train it at a low-resolution
the input may still at original resolution (for high quality interpolation), but the size during the computation and of the output are determined by the low-res factor
3. mermaid part, this is an non-parametric unit, where should call from the mermaid, and the output transformation map should be upsampled to the
full resolution size. All momentum based mermaid registration method should be supported. (todo support velcoity methods)
so the input and the output of each part should be
1. affine: input: source, target, output: s_warped, affine_map
2. momentum: input: init_warped_source, target, output: low_res_mom
3. mermaid: input: s, low_res_mom, low_res_initial_map output: map, warped_source
pay attention in Mermaid toolkit, the image intensity and identity transformation coord are normalized into [0,1],
while in networks the intensity and identity transformation coord are normalized into [-1,1],
todo use the coordinate system consistent with mermaid [0,1]
"""
def __init__(self, img_sz=None, opt=None):
super(MermaidNet, self).__init__()
opt_mermaid = opt['tsk_set']['reg']['mermaid_net']
low_res_factor = opt['tsk_set']['reg'][('low_res_factor',1.,"factor of low-resolution map")]
batch_sz = opt['tsk_set']['batch_sz']
self.record_path = opt['tsk_set']['path'][('record_path',"","record path")]
"""record path of the task"""
self.is_train = opt['tsk_set'][('train',False,'if is in train mode')]
"""if is in train mode"""
self.epoch = 0
"""the current epoch"""
self.using_physical_coord = opt_mermaid[('using_physical_coord',False,'use physical coordinate system')]
"""'use physical coordinate system"""
self.loss_type = opt['tsk_set']['loss'][('type','lncc',"the similarity measure type, support list: 'l1','mse','ncc','lncc'")]
"""the similarity measure supported by the mermaid: 'ssd','ncc','ncc_positive','ncc_negative', 'lncc', 'omt'"""
self.compute_inverse_map = opt['tsk_set']['reg'][('compute_inverse_map', False,"compute the inverse transformation map")]
"""compute the inverse transformation map"""
self.mermaid_net_json_pth = opt_mermaid[('mermaid_net_json_pth','',"the path for mermaid settings json")]
"""the path for mermaid settings json"""
self.sym_factor = opt_mermaid[('sym_factor',500,'factor on symmetric loss')]
"""factor on symmetric loss"""
self.epoch_activate_sym = opt_mermaid[('epoch_activate_sym',-1,'epoch activate the symmetric loss')]
"""epoch activate the symmetric loss"""
self.epoch_activate_multi_step = opt_mermaid[('epoch_activate_multi_step',-1,'epoch activate the multi-step')]
"""epoch activate the multi-step"""
self.reset_lr_for_multi_step = opt_mermaid[('reset_lr_for_multi_step',False,'if True, reset learning rate when multi-step begins')]
"""if True, reset learning rate when multi-step begins"""
self.lr_for_multi_step = opt_mermaid[('lr_for_multi_step',opt['tsk_set']['optim']['lr']/2,'if reset_lr_for_multi_step, reset learning rate when multi-step begins')]
"""if reset_lr_for_multi_step, reset learning rate when multi-step begins"""
self.multi_step = opt_mermaid[('num_step',2,'compute multi-step loss')]
"""compute multi-step loss"""
self.using_affine_init = opt_mermaid[('using_affine_init',True,'if ture, deploy an affine network before mermaid-net')]
"""if ture, deploy an affine network before mermaid-net"""
self.load_trained_affine_net = opt_mermaid[('load_trained_affine_net',True,'if true load_trained_affine_net; if false, the affine network is not initialized')]
"""if true load_trained_affine_net; if false, the affine network is not initialized"""
self.affine_init_path = opt_mermaid[('affine_init_path','',"the path of trained affined network")]
"""the path of trained affined network"""
self.affine_resoltuion = opt_mermaid[('affine_resoltuion',[-1,-1,-1],"the image resolution input for affine")]
self.affine_refine_step = opt_mermaid[('affine_refine_step', 5, "the multi-step num in affine refinement")]
"""the multi-step num in affine refinement"""
self.optimize_momentum_network = opt_mermaid[('optimize_momentum_network',True,'if true, optimize the momentum network')]
"""if true optimize the momentum network"""
self.epoch_list_fixed_momentum_network = opt_mermaid[('epoch_list_fixed_momentum_network',[-1],'list of epoch, fix the momentum network')]
"""list of epoch, fix the momentum network"""
self.epoch_list_fixed_deep_smoother_network = opt_mermaid[('epoch_list_fixed_deep_smoother_network',[-1],'epoch_list_fixed_deep_smoother_network')]
"""epoch_list_fixed_deep_smoother_network"""
self.clamp_momentum = opt_mermaid[('clamp_momentum',False,'clamp_momentum')]
"""if true, clamp_momentum"""
self.clamp_thre =opt_mermaid[('clamp_thre',1.0,'clamp momentum into [-clamp_thre, clamp_thre]')]
"""clamp momentum into [-clamp_thre, clamp_thre]"""
self.mask_input_when_compute_loss = opt_mermaid[
('mask_input_when_compute_loss', False, 'mask_input_when_compute_loss')]
""" mask input when compute loss"""
self.use_adaptive_smoother = False
self.print_loss_every_n_iter = 10 if self.is_train else 1
self.using_sym_on = True if self.is_train else False
if self.clamp_momentum:
print("Attention, the clamp momentum is on")
##### TODO the sigma also need to be set like sqrt(batch_sz) ##########
batch_sz = batch_sz if not self.using_sym_on else batch_sz*2
self.img_sz = [batch_sz, 1] + img_sz
self.affine_resoltuion = [batch_sz, 1]+ self.affine_resoltuion
self.dim = len(img_sz)
self.standard_spacing = 1. / (np.array(img_sz) - 1)
""" here we define the standard spacing measures the image coord from 0 to 1"""
spacing_to_refer = opt['dataset'][('spacing_to_refer',[1, 1, 1],'the physical spacing in numpy coordinate, only activate when using_physical_coord is true')]
self.spacing = normalize_spacing(spacing_to_refer, img_sz) if self.using_physical_coord else 1. / (
np.array(img_sz) - 1)
self.spacing = normalize_spacing(self.spacing, self.input_img_sz) if self.using_physical_coord else self.spacing
self.spacing = np.array(self.spacing) if type(self.spacing) is not np.ndarray else self.spacing
self.low_res_factor = low_res_factor
self.momentum_net = MomentumNet(low_res_factor,opt_mermaid)
if self.using_affine_init:
self.init_affine_net(opt)
else:
print("Attention, the affine net is not used")
self.mermaid_unit_st = None
self.init_mermaid_env()
self.print_count = 0
self.print_every_epoch_flag = True
self.n_batch = -1
self.inverse_map = None
def load_pretrained_model(self, pretrained_model_path):
checkpoint = torch.load(pretrained_model_path, map_location="cpu")
self.load_state_dict(checkpoint["state_dict"])
print("load pretrained model from {}".format(pretrained_model_path))
def check_if_update_lr(self):
"""
check if the learning rate need to be updated, in mermaid net, it is implemented for adjusting the lr in the multi-step training
:return: if update the lr, return True and new lr, else return False and None
"""
if self.epoch == self.epoch_activate_multi_step and self.reset_lr_for_multi_step:
lr = self.lr_for_multi_step
self.reset_lr_for_multi_step = False
print("the lr is change into {} due to the activation of the multi-step".format(lr))
return True, lr
else:
return False, None
def init_affine_net(self,opt):
"""
initialize the affine network, if an affine_init_path is given , then load the affine model from the path.
:param opt: ParameterDict, task setting
:return:
"""
self.affine_net = AffineNetSym(self.img_sz[2:],opt)
self.affine_param = None
self.affine_net.compute_loss = False
self.affine_net.epoch_activate_sym = 1e7 # todo to fix this unatural setting
self.affine_net.set_step(self.affine_refine_step)
model_path = self.affine_init_path
if self.load_trained_affine_net and self.is_train:
checkpoint = torch.load(model_path, map_location='cpu')
self.affine_net.load_state_dict(checkpoint['state_dict'])
self.affine_net.cuda()
print("Affine model is initialized!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
else:
print("The Affine model is added, but not initialized, this should only take place when a complete checkpoint (including affine model) will be loaded")
self.affine_net.eval()
def set_cur_epoch(self,epoch=-1):
"""
set current epoch
:param epoch:
:return:
"""
if self.epoch !=epoch+1:
self.print_every_epoch_flag=True
self.epoch = epoch+1
def set_loss_fn(self, loss_fn):
"""
set loss function (disabled)
:param loss_fn:
:return:
"""
pass
def save_cur_mermaid_settings(self,params):
"""
save the mermaid settings into task record folder
:param params:
:return:
"""
if len(self.record_path):
saving_path = os.path.join(self.record_path,'nonp_setting.json')
params.write_JSON(saving_path, save_int=False)
params.write_JSON_comments(saving_path.replace('.json','_comment.json'))
def init_mermaid_env(self):
"""
setup the mermaid environment
* saving the settings into record folder
* initialize model from model, criterion and related variables
"""
spacing = self.spacing
params = pars.ParameterDict()
params.load_JSON( self.mermaid_net_json_pth) #''../easyreg/cur_settings_svf.json')
print(" The mermaid setting from {} included:".format(self.mermaid_net_json_pth))
print(params)
model_name = params['model']['registration_model']['type']
use_map = params['model']['deformation']['use_map']
compute_similarity_measure_at_low_res = params['model']['deformation'][
('compute_similarity_measure_at_low_res', False, 'to compute Sim at lower resolution')]
params['model']['registration_model']['similarity_measure']['type'] =self.loss_type
params.print_settings_off()
self.mermaid_low_res_factor = self.low_res_factor
smoother_type = params['model']['registration_model']['forward_model']['smoother']['type']
self.use_adaptive_smoother =smoother_type=='learned_multiGaussianCombination'
lowResSize = None
lowResSpacing = None
##
if self.mermaid_low_res_factor == 1.0 or self.mermaid_low_res_factor == [1., 1., 1.]:
self.mermaid_low_res_factor = None
self.lowResSize = self.img_sz
self.lowResSpacing = spacing
##
if self.mermaid_low_res_factor is not None:
lowResSize = get_res_size_from_size(self.img_sz, self.mermaid_low_res_factor)
lowResSpacing = get_res_spacing_from_spacing(spacing, self.img_sz, lowResSize)
self.lowResSize = lowResSize
self.lowResSpacing = lowResSpacing
if self.mermaid_low_res_factor is not None:
# computes model at a lower resolution than the image similarity
if compute_similarity_measure_at_low_res:
mf = py_mf.ModelFactory(lowResSize, lowResSpacing, lowResSize, lowResSpacing)
else:
mf = py_mf.ModelFactory(self.img_sz, spacing, lowResSize, lowResSpacing)
else:
# computes model and similarity at the same resolution
mf = py_mf.ModelFactory(self.img_sz, spacing, self.img_sz, spacing)
model, criterion = mf.create_registration_model(model_name, params['model'], compute_inverse_map=self.compute_inverse_map)
if use_map:
# create the identity map [0,1]^d, since we will use a map-based implementation
_id = py_utils.identity_map_multiN(self.img_sz, spacing)
self.identityMap = torch.from_numpy(_id).cuda()
if self.mermaid_low_res_factor is not None:
# create a lower resolution map for the computations
lowres_id = py_utils.identity_map_multiN(lowResSize, lowResSpacing)
self.lowResIdentityMap = torch.from_numpy(lowres_id).cuda()
resize_affine_input = all([sz != -1 for sz in self.affine_resoltuion[2:]])
if resize_affine_input:
self.affine_spacing = get_res_spacing_from_spacing(spacing, self.img_sz, self.affine_resoltuion)
affine_id = py_utils.identity_map_multiN(self.affine_resoltuion, self.affine_spacing)
self.affineIdentityMap = torch.from_numpy(affine_id).cuda()
self.lowRes_fn = partial(get_resampled_image, spacing=spacing, desiredSize=lowResSize, zero_boundary=False,identity_map=self.lowResIdentityMap)
self.mermaid_unit_st = model.cuda()
self.criterion = criterion
self.mermaid_unit_st.associate_parameters_with_module()
self.save_cur_mermaid_settings(params)
def get_loss(self):
"""
get the overall loss
:return:
"""
return self.overall_loss
def __cal_sym_loss(self,rec_phiWarped):
"""
compute the symmetric loss,
:math: `loss_{sym} = \|(\varphi^{s t})^{-1} \circ(\varphi^{t s})^{-1}-i d\|_{2}^{2}`
:param rec_phiWarped:the transformation map, including two direction ( s-t, t-s in batch dimension)
:return: mean(`loss_{sym}`)
"""
trans1 = STNFunction_ND_BCXYZ(self.spacing,zero_boundary=False)
trans2 = STNFunction_ND_BCXYZ(self.spacing,zero_boundary=False)
st_map = rec_phiWarped[:self.n_batch]
ts_map = rec_phiWarped[self.n_batch:]
identity_map = self.identityMap[0:self.n_batch]
trans_st = trans1(identity_map,st_map)
trans_st_ts = trans2(trans_st,ts_map)
return torch.mean((identity_map- trans_st_ts)**2)
def do_criterion_cal(self, ISource, ITarget,cur_epoch=-1):
"""
get the loss according to mermaid criterion
:param ISource: Source image with full size
:param ITarget: Target image with full size
:param cur_epoch: current epoch
:return: overall loss (include sim, reg and sym(optional)), similarity loss and the regularization loss
"""
# todo the image is not necessary be normalized to [0,1] here, just keep -1,1 would be fine
ISource = (ISource + 1.) / 2.
ITarget = (ITarget + 1.) / 2.
low_moving = self.low_moving
if self.mask_input_when_compute_loss and self.moving_mask is not None and self.target_mask is not None:
ISource = ISource*self.moving_mask
ITarget = ITarget*self.target_mask
low_moving = low_moving*self.low_moving_mask
loss_overall_energy, sim_energy, reg_energy = self.criterion(self.identityMap, self.rec_phiWarped, ISource,
ITarget, low_moving,
self.mermaid_unit_st.get_variables_to_transfer_to_loss_function(),
None)
if not self.using_sym_on:
if self.print_count % self.print_loss_every_n_iter == 0 and cur_epoch>=0:
print('the loss_over_all:{} sim_energy:{}, reg_energy:{}'.format(loss_overall_energy.item(),
sim_energy.item(),
reg_energy.item()))
else:
sym_energy = self.__cal_sym_loss(self.rec_phiWarped)
sym_factor = self.sym_factor # min(sigmoid_explode(cur_epoch,static=1, k=8)*0.01*gl_sym_factor,1.*gl_sym_factor) #static=5, k=4)*0.01,1) static=10, k=10)*0.01
loss_overall_energy = loss_overall_energy + sym_factor * sym_energy
if self.print_count % self.print_loss_every_n_iter == 0 and cur_epoch >= 0:
print('the loss_over_all:{} sim_energy:{},sym_factor: {} sym_energy: {} reg_energy:{}'.format(
loss_overall_energy.item(),
sim_energy.item(),
sym_factor,
sym_energy.item(),
reg_energy.item()))
if self.step_loss is not None:
self.step_loss += loss_overall_energy
loss_overall_energy = self.step_loss
if self.cur_step<self.step-1:
self.print_count -= 1
self.print_count += 1
return loss_overall_energy, sim_energy, reg_energy
def set_mermaid_param(self,mermaid_unit,criterion, s, t, m,s_full=None):
"""
set variables need to be passed into mermaid model and mermaid criterion
:param mermaid_unit: model created by mermaid
:param criterion: criterion create by mermaid
:param s: source image (can be downsampled)
:param t: target image (can be downsampled)
:param m: momentum (can be downsampled)
:param s_full: full resolution image ( to get better sampling results)
:return:
"""
mermaid_unit.set_dictionary_to_pass_to_integrator({'I0': s, 'I1': t,'I0_full':s_full})
criterion.set_dictionary_to_pass_to_smoother({'I0': s, 'I1': t,'I0_full':s_full})
mermaid_unit.m = m
criterion.m = m
def __freeze_param(self,params):
"""
freeze the parameters during training
:param params: the parameters to be trained
:return:
"""
for param in params:
param.requires_grad = False
def __active_param(self,params):
"""
active the frozen parameters
:param params: the parameters to be activated
:return:
"""
for param in params:
param.requires_grad = True
def get_inverse_map(self,use_01=False):
"""
get the inverse map
:param use_01: if ture, get the map in [0,1] else in [-1,1]
:return: the inverse map
"""
if use_01 or self.inverse_map is None:
return self.inverse_map
else:
return self.inverse_map*2-1
def init_mermaid_param(self,s):
"""
initialize the mermaid parameters
:param s: source image taken as adaptive smoother input
:return:
"""
if self.use_adaptive_smoother:
if self.epoch in self.epoch_list_fixed_deep_smoother_network:
#self.mermaid_unit_st.smoother._enable_force_nn_gradients_to_zero_hooks()
self.__freeze_param(self.mermaid_unit_st.smoother.ws.parameters())
else:
self.__active_param(self.mermaid_unit_st.smoother.ws.parameters())
if self.mermaid_low_res_factor is not None:
if s.shape[0]==self.lowResIdentityMap.shape[0]:
low_s= get_resampled_image(s, self.spacing, self.lowResSize, 1, zero_boundary=True, identity_map=self.lowResIdentityMap)
else:
n_batch = s.shape[0]
lowResSize = self.lowResSize.copy()
lowResSize[0] = n_batch
low_s = get_resampled_image(s, self.spacing, lowResSize, 1, zero_boundary=True,
identity_map=self.lowResIdentityMap[0:n_batch])
return low_s
else:
return None
def do_mermaid_reg(self,mermaid_unit,criterion, s, t, m, phi,inv_map=None):
"""
perform mermaid registrtion unit
:param s: source image
:param t: target image
:param m: initial momentum
:param phi: initial deformation field
:param low_s: downsampled source
:param low_t: downsampled target
:param inv_map: inversed map
:return: warped image, transformation map
"""
if self.mermaid_low_res_factor is not None:
low_s, low_t = self.low_moving, self.low_target
self.set_mermaid_param(mermaid_unit,criterion,low_s, low_t, m,s)
if not self.compute_inverse_map:
maps = mermaid_unit(self.lowRes_fn(phi), low_s, variables_from_optimizer={'epoch':self.epoch})
else:
maps, inverse_maps = mermaid_unit(self.lowRes_fn(phi), low_s,phi_inv=self.lowRes_fn(inv_map), variables_from_optimizer={'epoch':self.epoch})
desiredSz = self.img_sz
rec_phiWarped = get_resampled_image(maps, self.lowResSpacing, desiredSz, 1,zero_boundary=False,identity_map=self.identityMap)
if self.compute_inverse_map:
self.inverse_map = get_resampled_image(inverse_maps, self.lowResSpacing, desiredSz, 1,
zero_boundary=False,identity_map=self.identityMap)
else:
self.set_mermaid_param(mermaid_unit,criterion,s, t, m,s)
if not self.compute_inverse_map:
maps = mermaid_unit(phi, s, variables_from_optimizer={'epoch':self.epoch})
else:
maps, self.inverse_map = mermaid_unit(phi, s,phi_inv = inv_map, variables_from_optimizer = {'epoch': self.epoch})
rec_phiWarped = maps
rec_IWarped = py_utils.compute_warped_image_multiNC(s, rec_phiWarped, self.spacing, 1,zero_boundary=True)
self.rec_phiWarped = rec_phiWarped
return rec_IWarped, rec_phiWarped
def __get_momentum(self):
momentum = self.mermaid_unit_st.m[:self.n_batch]
return momentum
def __get_adaptive_smoother_map(self):
"""
get the adaptive smoother weight map from spatial-variant regualrizer model
supported weighting type 'sqrt_w_K_sqrt_w' and 'w_K_w'
for weighting type == 'w_k_w'
:math:`\sigma^{2}(x)=\sum_{i=0}^{N-1} w^2_{i}(x) \sigma_{i}^{2}`
for weighting type = 'sqrt_w_K_sqrt_w'
:math:`\sigma^{2}(x)=\sum_{i=0}^{N-1} w_{i}(x) \sigma_{i}^{2}`
:return: adapative smoother weight map `\sigma`
"""
adaptive_smoother_map = self.mermaid_unit_st.smoother.get_deep_smoother_weights()
weighting_type = self.mermaid_unit_st.smoother.weighting_type
if not self.using_sym_on:
adaptive_smoother_map = adaptive_smoother_map.detach()
else:
adaptive_smoother_map = adaptive_smoother_map[:self.n_batch].detach()
gaussian_weights = self.mermaid_unit_st.smoother.get_gaussian_weights()
gaussian_weights = gaussian_weights.detach()
print(" the current global gaussian weight is {}".format(gaussian_weights))
gaussian_stds = self.mermaid_unit_st.smoother.get_gaussian_stds()
gaussian_stds = gaussian_stds.detach()
print(" the current global gaussian stds is {}".format(gaussian_stds))
view_sz = [1] + [len(gaussian_stds)] + [1] * dim
gaussian_stds = gaussian_stds.view(*view_sz)
if weighting_type == 'w_K_w':
adaptive_smoother_map = adaptive_smoother_map**2 # todo add if judgement, this is true only when we use w_K_W
smoother_map = adaptive_smoother_map*(gaussian_stds**2)
smoother_map = torch.sqrt(torch.sum(smoother_map,1,keepdim=True))
#_,smoother_map = torch.max(adaptive_smoother_map.detach(),dim=1,keepdim=True)
self._display_stats(smoother_map.float(),'statistic for max_smoother map')
return smoother_map
def _display_stats(self, Ia, iname):
"""
statistic analysis on variable, print min, mean, max and std
:param Ia: the input variable
:param iname: variable name
:return:
"""
Ia_min = Ia.min().detach().cpu().numpy()
Ia_max = Ia.max().detach().cpu().numpy()
Ia_mean = Ia.mean().detach().cpu().numpy()
Ia_std = Ia.std().detach().cpu().numpy()
print('{}:after: [{:.2f},{:.2f},{:.2f}]({:.2f})'.format(iname, Ia_min,Ia_mean,Ia_max,Ia_std))
def get_extra_to_plot(self):
"""
plot extra image, i.e. the initial weight map of rdmm model
:return: extra image, name
"""
if self.use_adaptive_smoother:
# the last step adaptive smoother is returned, todo add the first stage smoother
return self.__get_adaptive_smoother_map(), 'Inital_weight'
else:
return self.__get_momentum(), "Momentum"
def __transfer_return_var(self,rec_IWarped,rec_phiWarped,affine_img):
"""
normalize the image into [0,1] while map into [-1,1]
:param rec_IWarped: warped image
:param rec_phiWarped: transformation map
:param affine_img: affine image
:return:
"""
return (rec_IWarped).detach(), (rec_phiWarped * 2. - 1.).detach(), ((affine_img+1.)/2.).detach()
def affine_forward(self,moving, target=None):
if self.using_affine_init:
with torch.no_grad():
toaffine_moving, toaffine_target = moving, target
resize_affine_input = all([sz != -1 for sz in self.affine_resoltuion[2:]])
if resize_affine_input:
toaffine_moving = get_resampled_image(toaffine_moving, self.spacing, self.affine_resoltuion, identity_map=self.affineIdentityMap)
toaffine_target = get_resampled_image(toaffine_target, self.spacing, self.affine_resoltuion, identity_map=self.affineIdentityMap)
affine_img, affine_map, affine_param = self.affine_net(toaffine_moving, toaffine_target)
self.affine_param = affine_param
affine_map = (affine_map + 1) / 2.
inverse_map = None
if self.compute_inverse_map:
inverse_map = self.affine_net.get_inverse_map(use_01=True)
if resize_affine_input:
affine_img = py_utils.compute_warped_image_multiNC(moving, affine_map, self.spacing, 1,
zero_boundary=True, use_01_input=True)
if self.using_physical_coord:
for i in range(self.dim):
affine_map[:, i] = affine_map[:, i] * self.spacing[i] / self.standard_spacing[i]
if self.compute_inverse_map:
for i in range(self.dim):
inverse_map[:, i] = inverse_map[:, i] * self.spacing[i] / self.standard_spacing[i]
self.inverse_map = inverse_map
else:
num_b = moving.shape[0]
affine_map = self.identityMap[:num_b].clone()
if self.compute_inverse_map:
self.inverse_map = self.identityMap[:num_b].clone()
affine_img = moving
return affine_img, affine_map
def mutli_step_forward(self, moving,target=None,moving_mask=None, target_mask=None):
"""
mutli-step mermaid registration
:param moving: moving image with intensity [-1,1]
:param target: target image with intensity [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
self.step_loss = None
affine_img, affine_map = self.affine_forward(moving,target)
warped_img = affine_img
init_map = affine_map
rec_IWarped = None
rec_phiWarped = None
moving_n = (moving + 1) / 2. # [-1,1] ->[0,1]
target_n = (target + 1) / 2. # [-1,1] ->[0,1]
self.low_moving = self.init_mermaid_param(moving_n)
self.low_target = self.init_mermaid_param(target_n)
self.moving_mask, self.target_mask = moving_mask, target_mask
self.low_moving_mask, self.low_target_mask = None, None
if self.mask_input_when_compute_loss and moving_mask is not None and target_mask is not None:
self.low_moving_mask = self.init_mermaid_param(moving_mask)
self.low_target_mask = self.init_mermaid_param(target_mask)
for i in range(self.step):
self.cur_step = i
record_is_grad_enabled = torch.is_grad_enabled()
if not self.optimize_momentum_network or self.epoch in self.epoch_list_fixed_momentum_network:
torch.set_grad_enabled(False)
if self.print_every_epoch_flag:
if self.epoch in self.epoch_list_fixed_momentum_network:
print("In this epoch, the momentum network is fixed")
if self.epoch in self.epoch_list_fixed_deep_smoother_network:
print("In this epoch, the deep regularizer network is fixed")
self.print_every_epoch_flag = False
input = torch.cat((warped_img, target), 1)
m = self.momentum_net(input)
if self.clamp_momentum:
m=m.clamp(max=self.clamp_thre,min=-self.clamp_thre)
torch.set_grad_enabled(record_is_grad_enabled)
rec_IWarped, rec_phiWarped = self.do_mermaid_reg(self.mermaid_unit_st,self.criterion,moving_n, target_n, m, init_map, self.inverse_map)
warped_img = rec_IWarped * 2 - 1 # [0,1] -> [-1,1]
init_map = rec_phiWarped # [0,1]
self.rec_phiWarped = rec_phiWarped
if i < self.step - 1:
self.step_loss, _, _ = self.do_criterion_cal(moving, target, self.epoch)
if self.using_physical_coord:
rec_phiWarped_tmp = rec_phiWarped.detach().clone()
for i in range(self.dim):
rec_phiWarped_tmp[:, i] = rec_phiWarped[:, i] * self.standard_spacing[i] / self.spacing[i]
rec_phiWarped = rec_phiWarped_tmp
self.overall_loss,_,_= self.do_criterion_cal(moving, target, cur_epoch=self.epoch)
return self.__transfer_return_var(rec_IWarped, rec_phiWarped, affine_img)
def mutli_step_sym_forward(self,moving, target=None,moving_mask=None, target_mask=None):
"""
symmetric multi-step mermaid registration
the "source" is concatenated by source and target, the "target" is concatenated by target and source
then the multi-step forward is called
:param moving: moving image with intensity [-1,1]
:param target: target image with intensity [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
moving_sym = torch.cat((moving, target), 0)
target_sym = torch.cat((target, moving), 0)
moving_mask_sym, target_mask_sym = None, None
if moving_mask is not None and target_mask is not None:
moving_mask_sym = torch.cat((moving_mask, target_mask), 0)
target_mask_sym = torch.cat((target_mask, moving_mask), 0)
rec_IWarped, rec_phiWarped, affine_img = self.mutli_step_forward(moving_sym, target_sym,moving_mask_sym,target_mask_sym)
return rec_IWarped[:self.n_batch], rec_phiWarped[:self.n_batch], affine_img[:self.n_batch]
def get_affine_map(self,moving, target):
"""
compute affine map from the affine registration network
:param moving: moving image [-1, 1]
:param target: target image [-1, 1]
:return: affined image [-1,1]
"""
with torch.no_grad():
affine_img, affine_map, _ = self.affine_net(moving, target)
return affine_map
def get_step_config(self):
"""
check if the multi-step, symmetric forward shoud be activated
:return:
"""
if self.is_train:
self.step = self.multi_step if self.epoch > self.epoch_activate_multi_step else 1
self.using_sym_on = True if self.epoch> self.epoch_activate_sym else False
else:
self.step = self.multi_step
self.using_sym_on = False
def forward(self, moving, target, moving_mask=None, target_mask=None):
"""
forward the mermaid registration model
:param moving: moving image intensity normalized in [-1,1]
:param target: target image intensity normalized in [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
self.get_step_config()
self.n_batch = moving.shape[0]
if self.using_sym_on:
if not self.print_count:
print(" The mermaid network is in multi-step and symmetric mode, with step {}".format(self.step))
return self.mutli_step_sym_forward(moving,target,moving_mask, target_mask)
else:
if not self.print_count:
print(" The mermaid network is in multi-step mode, with step {}".format(self.step))
return self.mutli_step_forward(moving, target,moving_mask, target_mask)
| 34,904 | 47.479167 | 175 | py |
easyreg | easyreg-master/easyreg/reg_net.py | from .base_mermaid import MermaidBase
from .affine_net import *
from .net_utils import print_network
from .losses import Loss
import torch.optim.lr_scheduler as lr_scheduler
from .utils import *
from .mermaid_net import MermaidNet
from .voxel_morph import VoxelMorphCVPR2018, VoxelMorphMICCAI2019
# from .multiscale_net_new2_opt import Multiscale_FlowNet
#from .lin_unpublic_net import model
from .brainstorm import TransformCVPR2019, AppearanceCVPR2019
model_pool = {
'affine_sym': AffineNetSym,
'mermaid': MermaidNet,
'vm_cvpr': VoxelMorphCVPR2018,
'vm_miccai': VoxelMorphMICCAI2019,
# 'multiscale_net':Multiscale_FlowNet,
#"unpublic_lin": model,
"bs_trans": TransformCVPR2019,
'bs_ap': AppearanceCVPR2019
}
class RegNet(MermaidBase):
"""registration network class"""
def name(self):
return 'reg-net'
def initialize(self, opt):
"""
initialize variable settings of RegNet
:param opt: ParameterDict, task settings
:return:
"""
MermaidBase.initialize(self, opt)
input_img_sz = opt['dataset']['img_after_resize']
self.input_img_sz = input_img_sz
""" the input image sz of the network"""
spacing_to_refer = opt['dataset'][('spacing_to_refer',[1, 1, 1],'the physical spacing in numpy coordinate')]
self.spacing = normalize_spacing(spacing_to_refer, self.input_img_sz) if self.use_physical_coord else 1. / (
np.array(input_img_sz) - 1)
""" image spacing"""
method_name = opt['tsk_set']['method_name']
self.method_name = method_name
"""the name of the method"""
self.affine_on = True if 'affine' in method_name else False
""" perform affine registrtion, if affine is in the network name"""
self.nonp_on = not self.affine_on
""" perform affine and nonparametric registration, if mermaid is in the network name"""
self.network = model_pool[method_name](input_img_sz, opt)
"""create network model"""
# self.network.apply(weights_init)
self.criticUpdates = opt['tsk_set']['criticUpdates']
"""update the gradient every # iter"""
loss_fn = Loss(opt)
self.network.set_loss_fn(loss_fn)
self.opt_optim = opt['tsk_set']['optim']
"""settings for the optimizer"""
self.init_optimize_instance(warmming_up=True)
"""initialize the optimizer and scheduler"""
self.step_count = 0.
""" count of the step"""
self.use_01 = False
""" the map is normalized to [-1,1] in registration net, todo normalized into [0,1], to be consisitent with mermaid """
print('---------- Networks initialized -------------')
print_network(self.network)
print('-----------------------------------------------')
def init_optimize_instance(self, warmming_up=False):
""" get optimizer and scheduler instance"""
self.optimizer, self.lr_scheduler, self.exp_lr_scheduler = self.init_optim(self.opt_optim, self.network,
warmming_up=warmming_up)
def update_learning_rate(self, new_lr=-1):
"""
set new learning rate
:param new_lr: new learning rate
:return:
"""
if new_lr < 0:
lr = self.opt_optim['lr']
else:
lr = new_lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr_scheduler.base_lrs=[lr]
self.lr_scheduler.last_epoch = 1
print(" the learning rate now is set to {}".format(lr))
def set_input(self, data, is_train=True):
"""
:param data:
:param is_train:
:return:
"""
img_and_label, self.fname_list = data
self.pair_path = data[0]['pair_path']
if self.gpu_ids is not None and self.gpu_ids>=0:
img_and_label['image'] = img_and_label['image'].cuda()
if 'label' in img_and_label:
img_and_label['label'] = img_and_label['label'].cuda()
moving, target, l_moving, l_target = get_reg_pair(img_and_label)
self.moving = moving
self.target = target
self.l_moving = l_moving
self.l_target = l_target
self.original_spacing = data[0]['original_spacing']
self.original_im_sz = data[0]['original_sz']
def init_optim(self, opt, network, warmming_up=False):
"""
set optimizers and scheduler
:param opt: settings on optimizer
:param network: model with learnable parameters
:param warmming_up: if set as warmming up
:return: optimizer, custom scheduler, plateau scheduler
"""
optimize_name = opt['optim_type']
lr = opt['lr']
beta = opt['adam']['beta']
lr_sched_opt = opt[('lr_scheduler',{},"settings for learning scheduler")]
self.lr_sched_type = lr_sched_opt['type']
if optimize_name == 'adam':
re_optimizer = torch.optim.Adam(network.parameters(), lr=lr, betas=(beta, 0.999))
else:
re_optimizer = torch.optim.SGD(network.parameters(), lr=lr)
re_optimizer.zero_grad()
re_lr_scheduler = None
re_exp_lr_scheduler = None
if self.lr_sched_type == 'custom':
step_size = lr_sched_opt['custom'][('step_size',50,"update the learning rate every # epoch")]
gamma = lr_sched_opt['custom'][('gamma',0.5,"the factor for updateing the learning rate")]
re_lr_scheduler = torch.optim.lr_scheduler.StepLR(re_optimizer, step_size=step_size, gamma=gamma)
elif self.lr_sched_type == 'plateau':
patience = lr_sched_opt['plateau']['patience']
factor = lr_sched_opt['plateau']['factor']
threshold = lr_sched_opt['plateau']['threshold']
min_lr = lr_sched_opt['plateau']['min_lr']
re_exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(re_optimizer, mode='min', patience=patience,
factor=factor, verbose=True,
threshold=threshold, min_lr=min_lr)
if not warmming_up:
print(" no warming up the learning rate is {}".format(lr))
else:
lr = opt['lr']/10
for param_group in re_optimizer.param_groups:
param_group['lr'] = lr
re_lr_scheduler.base_lrs = [lr]
print(" warming up on the learning rate is {}".format(lr))
return re_optimizer, re_lr_scheduler, re_exp_lr_scheduler
def cal_loss(self, output=None):
loss = self.network.get_loss()
return loss
def backward_net(self, loss):
loss.backward()
def get_debug_info(self):
""" get filename of the failed cases"""
info = {'file_name': self.fname_list}
return info
def forward(self, input=None):
"""
:param input(not used )
:return: warped image intensity with [-1,1], transformation map defined in [-1,1], affine image if nonparameteric reg else affine parameter
"""
if hasattr(self.network, 'set_cur_epoch'):
self.network.set_cur_epoch(self.cur_epoch)
output, phi, afimg_or_afparam = self.network.forward(self.moving, self.target, self.l_moving, self.l_target)
loss = self.cal_loss()
if not self.is_train and (self.affine_on or self.method_name=="mermaid"):
save_affine_param_with_easyreg_custom(self.network.affine_param,self.record_path,self.fname_list)
return output, phi, afimg_or_afparam, loss
def update_scheduler(self,epoch):
if self.lr_scheduler is not None and epoch>0:
self.lr_scheduler.step(epoch)
for param_group in self.optimizer.param_groups:
print("the current epoch is {} with learining rate set at {}".format(epoch,param_group['lr']))
def optimize_parameters(self, input=None):
"""
forward and backward the model, optimize parameters and manage the learning rate
:param input: input(not used
:return:
"""
if self.is_train:
self.iter_count += 1
self.output, self.phi, self.afimg_or_afparam, loss = self.forward()
self.backward_net(loss / self.criticUpdates)
self.loss = loss.item()
if self.iter_count % self.criticUpdates == 0:
self.optimizer.step()
self.optimizer.zero_grad()
update_lr, lr = self.network.check_if_update_lr()
if update_lr:
self.update_learning_rate(lr)
def do_some_clean(self):
self.loss = None
self.gt = None
self.input = None
self.output = None
self.phi = None
self.afimg_or_afparam = None
def get_current_errors(self):
return self.loss
def get_jacobi_val(self):
"""
:return: the sum of absolute value of negative determinant jacobi, the num of negative determinant jacobi voxels
"""
return self.jacobi_val
def save_image_into_original_sz_with_given_reference(self):
"""
save the image into original image sz and physical coordinate, the path of reference image should be given
:return:
"""
self._save_image_into_original_sz_with_given_reference(self.pair_path, self.phi, inverse_phis=self.inverse_phi,
use_01=self.use_01)
def get_extra_to_plot(self):
"""
extra image to be visualized
:return: image (BxCxXxYxZ), name
"""
return self.network.get_extra_to_plot()
def set_train(self):
self.network.train(True)
self.is_train = True
torch.set_grad_enabled(True)
def set_val(self):
self.network.train(False)
self.is_train = False
torch.set_grad_enabled(False)
def set_debug(self):
self.network.train(False)
self.is_train = False
torch.set_grad_enabled(False)
def set_test(self):
self.network.train(False)
self.is_train = False
torch.set_grad_enabled(False)
| 10,289 | 37.111111 | 147 | py |
easyreg | easyreg-master/easyreg/utils.py | import torch
import numpy as np
import skimage
import os
import torchvision.utils as utils
import SimpleITK as sitk
from skimage import color
import mermaid.image_sampling as py_is
from mermaid.data_wrapper import AdaptVal,MyTensor
from .net_utils import gen_identity_map
from .net_utils import Bilinear
import mermaid.utils as py_utils
import mermaid.module_parameters as pars
import mermaid.smoother_factory as sf
def get_reg_pair(data,ch=1):
"""
get image pair from data, pair is concatenated by the channel
:param data: a dict, including {'img':, 'label':}
:param pair:
:param target: target image
:param ch: the num of input channel
:return: image BxCxXxYxZ, label BxCxXxYxZ
"""
if 'label' in data:
return data['image'][:,0:ch], data['image'][:,ch:2*ch],data['label'][:,0:ch],data['label'][:,ch:2*ch]
else:
return data['image'][:,0:ch], data['image'][:,ch:2*ch],None, None
def get_seg_pair(data, is_train=True):
"""
get image and gt from data, pair is concatenated by the channel
:param data: a dict, including {'img':, 'label':}
:return: image BxCxXxYxZ, label BxCxXxYxZ
"""
if not is_train:
data['image']= data['image'][0]
if 'label' in data:
data['label'] = data['label'][0]
if 'label' in data:
return data['image'], data['label']
else:
return data['image'],None
def sigmoid_explode(ep, static =5, k=5):
"""
factor increase with epoch, factor = (k + exp(ep / k))/k
:param ep: cur epoch
:param static: at the first # epoch, the factor keep unchanged
:param k: the explode factor
:return:
"""
static = static
if ep < static:
return 1.
else:
ep = ep - static
factor= (k + np.exp(ep / k))/k
return float(factor)
def sigmoid_decay(ep, static =5, k=5):
"""
factor decease with epoch, factor = k/(k + exp(ep / k))
:param ep: cur epoch
:param static: at the first # epoch, the factor keep unchanged
:param k: the decay factor
:return:
"""
static = static
if ep < static:
return float(1.)
else:
ep = ep - static
factor = k/(k + np.exp(ep / k))
return float(factor)
def factor_tuple(input,factor):
"""
multiply a factor to each tuple elem
:param input:
:param factor:
:return:
"""
input_np = np.array(list(input))
input_np = input_np*factor
return tuple(list(input_np))
def resize_spacing(img_sz,img_sp,factor):
"""
compute the new spacing with regard to the image resampling factor
:param img_sz: img sz
:param img_sp: img spacing
:param factor: factor of resampling image
:return:
"""
img_sz_np = np.array(list(img_sz))
img_sp_np = np.array(list(img_sp))
new_sz_np = img_sz_np*factor
new_sp = img_sp_np*(img_sz_np-1)/(new_sz_np-1)
return tuple(list(new_sp))
def save_image_with_scale(path, variable):
"""
the input variable is [-1,1], save into image
:param path: path to save
:param variable: variable to save, XxY
:return:
"""
arr = variable.cpu().data.numpy()
arr = np.clip(arr, -1., 1.)
arr = (arr+1.)/2 * 255.
arr = arr.astype(np.uint8)
skimage.io.imsave(path, arr)
def get_transform_with_itk_format(disp_np, spacing,original, direction):
import SimpleITK as sitk
# Create a composite transform then write and read.
displacement = sitk.DisplacementFieldTransform(3)
field_size = list(np.flipud(disp_np.shape[1:]).astype(np.float64))
field_origin = list(original)
field_spacing = list(spacing)
field_direction = list(direction) # direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement.SetFixedParameters(field_size + field_origin + field_spacing + field_direction)
displacement.SetParameters(np.transpose(disp_np,[1,2,3,0]).reshape(-1).astype(np.float64))
return displacement
def make_image_summary(images, truths, raw_output, maxoutput=4, overlap=True):
"""make image summary for tensorboard
:param images: torch.Variable, NxCxDxHxW, 3D image volume (C:channels)
:param truths: torch.Variable, NxDxHxW, 3D label mask
:param raw_output: torch.Variable, NxCxHxWxD: prediction for each class (C:classes)
:param maxoutput: int, number of samples from a batch
:param overlap: bool, overlap the image with groundtruth and predictions
:return: summary_images: list, a maxoutput-long list with element of tensors of Nx
"""
slice_ind = images.size()[2] // 2
images_2D = images.data[:maxoutput, :, slice_ind, :, :]
truths_2D = truths.data[:maxoutput, slice_ind, :, :]
predictions_2D = torch.max(raw_output.data, 1)[1][:maxoutput, slice_ind, :, :]
grid_images = utils.make_grid(images_2D, pad_value=1)
grid_truths = utils.make_grid(labels2colors(truths_2D, images=images_2D, overlap=overlap), pad_value=1)
grid_preds = utils.make_grid(labels2colors(predictions_2D, images=images_2D, overlap=overlap), pad_value=1)
return torch.cat([grid_images, grid_truths, grid_preds], 1)
def labels2colors(labels, images=None, overlap=False):
"""Turn label masks into color images
:param labels: torch.tensor, NxMxN
:param images: torch.tensor, NxMxN or NxMxNx3
:param overlap: bool
:return: colors: torch.tensor, Nx3xMxN
"""
colors = []
if overlap:
if images is None:
raise ValueError("Need background images when overlap is True")
else:
for i in range(images.size()[0]):
image = images.squeeze()[i, :, :]
label = labels[i, :, :]
colors.append(color.label2rgb(label.cpu().numpy(), image.cpu().numpy(), bg_label=0, alpha=0.7))
else:
for i in range(images.size()[0]):
label = labels[i, :, :]
colors.append(color.label2rgb(label.numpy(), bg_label=0))
return torch.Tensor(np.transpose(np.stack(colors, 0), (0, 3, 1, 2))).cuda()
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
if type(v) == torch.Tensor:
return v.detach().cpu().numpy()
else:
try:
return v.cpu().numpy()
except:
return v
def make_dir(path):
is_exist = os.path.exists(path)
if not is_exist:
os.makedirs(path)
return is_exist
def lift_to_dimension(A,dim):
"""
Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
Assumes a numpy array as input
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim>dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim==dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
def update_affine_param( cur_af, last_af): # A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2
"""
update the current affine parameter A2 based on last affine parameter A1
A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2, results in the composed affine parameter A3=(A2A1, A2*b1+b2)
:param cur_af: current affine parameter
:param last_af: last affine parameter
:return: composed affine parameter A3
"""
cur_af = cur_af.view(cur_af.shape[0], 4, 3)
last_af = last_af.view(last_af.shape[0],4,3)
updated_af = torch.zeros_like(cur_af.data).to(cur_af.device)
dim =3
updated_af[:,:3,:] = torch.matmul(cur_af[:,:3,:],last_af[:,:3,:])
updated_af[:,3,:] = cur_af[:,3,:] + torch.squeeze(torch.matmul(cur_af[:,:3,:], torch.transpose(last_af[:,3:,:],1,2)),2)
updated_af = updated_af.contiguous().view(cur_af.shape[0],-1)
return updated_af
def get_inverse_affine_param(affine_param,dim=3):
"""A2(A1*x+b1) +b2= A2A1*x + A2*b1+b2 = x A2= A1^-1, b2 = - A2^b1"""
affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)
inverse_param = torch.zeros_like(affine_param.data).to(affine_param.device)
for n in range(affine_param.shape[0]):
tm_inv = torch.inverse(affine_param[n, :dim,:])
inverse_param[n, :dim, :] = tm_inv
inverse_param[n, dim, :] = - torch.matmul(tm_inv, affine_param[n, dim, :])
inverse_param = inverse_param.contiguous().view(affine_param.shape[0], -1)
return inverse_param
def gen_affine_map(Ab, img_sz, dim=3):
"""
generate the affine transformation map with regard to affine parameter
:param Ab: affine parameter
:param img_sz: image sz [X,Y,Z]
:return: affine transformation map
"""
Ab = Ab.view(Ab.shape[0], dim+1, dim)
phi = gen_identity_map(img_sz).to(Ab.device)
phi_cp = phi.view(dim, -1)
affine_map = torch.matmul(Ab[:, :dim, :], phi_cp)
affine_map = Ab[:, dim, :].contiguous().view(-1, dim, 1) + affine_map
affine_map = affine_map.view([Ab.shape[0]] + list(phi.shape))
return affine_map
def transfer_mermaid_affine_into_easyreg_affine(affine_param, dim=3):
affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)
I = torch.ones(dim).to(affine_param.device)
b = affine_param[:, dim,:]
affine_param[:,:dim,:]= affine_param[:,:dim,:].transpose(1, 2)
affine_param[:, dim,:] =2*b +torch.matmul(affine_param[:,:dim,:],I)-1 # easyreg assume map is defined in [-1,1] whle the mermaid assumes [0,1]
affine_param = affine_param.contiguous()
affine_param = affine_param.view(affine_param.shape[0],-1)
return affine_param
def transfer_easyreg_affine_into_mermaid_affine(affine_param, dim=3):
affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)
I = torch.ones(dim).to(affine_param.device)
b = affine_param[:, dim,:]
affine_param[:, dim,:] = (b-torch.matmul(affine_param[:,:dim,:],I)+1)/2 # the order here is important
affine_param[:,:dim,:]= affine_param[:,:dim,:].transpose(1, 2)
affine_param = affine_param.contiguous()
affine_param = affine_param.view(affine_param.shape[0],-1)
return affine_param
def save_affine_param_with_easyreg_custom(affine_param, output_path, fname_list, affine_compute_from_mermaid=False):
if affine_param is not None:
affine_param = affine_param.detach().clone()
if affine_compute_from_mermaid:
affine_param = transfer_mermaid_affine_into_easyreg_affine(affine_param)
if isinstance(affine_param, list):
affine_param = affine_param[0]
affine_param = affine_param.detach().cpu().numpy()
for i in range(len(fname_list)):
np.save(os.path.join(output_path, fname_list[i]) + '_affine_param.npy', affine_param[i])
def get_warped_img_map_param( Ab, img_sz, moving, dim=3, zero_boundary=True):
"""
generate the affine transformation map with regard to affine parameter
:param Ab: affine parameter
:param img_sz: image sz [X,Y,Z]
:param moving: moving image BxCxXxYxZ
:param zero_boundary: zero_boundary condition
:return: affine image, affine transformation map, affine parameter
"""
bilinear = Bilinear(zero_boundary)
affine_map = gen_affine_map(Ab,img_sz,dim)
output = bilinear(moving, affine_map)
return output, affine_map, Ab
def show_current_pair_by_3d_slice(iS,iT):
"""
visualize the pair image by slice
:param iS: source image
:param iT: target image
:return:
"""
import matplotlib.pyplot as plt
import easyreg.viewers as viewers
fig, ax = plt.subplots(2,3)
plt.setp(plt.gcf(), 'facecolor', 'white')
plt.style.use('bmh')
ivsx = viewers.ImageViewer3D_Sliced(ax[0][0], iS, 0, 'source X', True)
ivsy = viewers.ImageViewer3D_Sliced(ax[0][1], iS, 1, 'source Y', True)
ivsz = viewers.ImageViewer3D_Sliced(ax[0][2], iS, 2, 'source Z', True)
ivtx = viewers.ImageViewer3D_Sliced(ax[1][0], iT, 0, 'target X', True)
ivty = viewers.ImageViewer3D_Sliced(ax[1][1], iT, 1, 'target Y', True)
ivtz = viewers.ImageViewer3D_Sliced(ax[1][2], iT, 2, 'target Z', True)
feh = viewers.FigureEventHandler(fig)
feh.add_axes_event('button_press_event', ax[0][0], ivsx.on_mouse_press, ivsx.get_synchronize, ivsx.set_synchronize)
feh.add_axes_event('button_press_event', ax[0][1], ivsy.on_mouse_press, ivsy.get_synchronize, ivsy.set_synchronize)
feh.add_axes_event('button_press_event', ax[0][2], ivsz.on_mouse_press, ivsz.get_synchronize, ivsz.set_synchronize)
feh.add_axes_event('button_press_event', ax[1][0], ivtx.on_mouse_press, ivtx.get_synchronize, ivtx.set_synchronize)
feh.add_axes_event('button_press_event', ax[1][1], ivty.on_mouse_press, ivty.get_synchronize, ivty.set_synchronize)
feh.add_axes_event('button_press_event', ax[1][2], ivtz.on_mouse_press, ivtz.get_synchronize, ivtz.set_synchronize)
feh.synchronize([ax[0][0], ax[1][0]])
feh.synchronize([ax[0][1], ax[1][1]])
feh.synchronize([ax[0][2], ax[1][2]])
def get_res_size_from_size(sz, factor):
"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None) :
print('WARNING: Could not compute low_res_size as factor was ' + str( factor ))
return sz
else:
lowResSize = np.array(sz)
if not isinstance(factor, list):
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')
else:
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')
if lowResSize[-1]%2!=0:
lowResSize[-1]-=1
print('\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n')
return lowResSize
def get_res_spacing_from_spacing(spacing, sz, lowResSize):
"""
Computes spacing for the low-res parameterization from image spacing
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
#todo: check that this is the correct way of doing it
if len(sz) == len(spacing):
sz = [1,1]+sz
if len(lowResSize)==len(spacing):
lowResSize = [1,1]+lowResSize
return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1)
def _compute_low_res_image(I,spacing,low_res_size,zero_boundary=False):
sampler = py_is.ResampleImage()
low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],1,zero_boundary=zero_boundary)
return low_res_image
def resample_image(I,spacing,desiredSize, spline_order=1,zero_boundary=False,identity_map=None):
"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after downsampling
"""
if len(I.shape) != len(desiredSize)+2:
desiredSize = desiredSize[2:]
sz = np.array(list(I.size()))
# check that the batch size and the number of channels is the same
nrOfI = sz[0]
nrOfC = sz[1]
desiredSizeNC = np.array([nrOfI,nrOfC]+list(desiredSize))
newspacing = spacing*((sz[2::].astype('float')-1.)/(desiredSizeNC[2::].astype('float')-1.)) ###########################################
if identity_map is not None:
idDes= identity_map
else:
idDes = torch.from_numpy(py_utils.identity_map_multiN(desiredSizeNC,newspacing)).to(I.device)
# now use this map for resampling
ID = py_utils.compute_warped_image_multiNC(I, idDes, newspacing, spline_order,zero_boundary)
return ID, newspacing
def get_resampled_image(I,spacing,desiredSize, spline_order=1,zero_boundary=False,identity_map=None):
"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""
if spacing is None:
img_sz = I.shape[2:]
spacing = 1./(np.array(img_sz)-1)
if identity_map is not None:# todo will remove, currently fix for symmetric training
if I.shape[0] != identity_map.shape[0]:
n_batch = I.shape[0]
desiredSize =desiredSize.copy()
desiredSize[0] = n_batch
identity_map = identity_map[:n_batch]
resampled,new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order, zero_boundary=zero_boundary,identity_map=identity_map)
return resampled
def load_inital_weight_from_pt(path):
init_weight = torch.load(path)
return init_weight
def get_init_weight_from_label_map(lsource, spacing,default_multi_gaussian_weights,multi_gaussian_weights,weight_type='w_K_w'):
"""
for rdmm model with spatial-variant regularizer, we initialize multi gaussian weight with regard to the label map
assume img sz BxCxXxYxZ and N gaussian smoothers are taken, the return weight map should be BxNxXxYxZ
:param lsource: label of the source image
:param spacing: image spacing
:param default_multi_gaussian_weights: multi-gaussian weight set for the background
:param multi_gaussian_weights: multi-gaussian weight set for the foreground( labeled region)
:param weight_type: either w_K_w or sqrt_w_K_sqrt_w
:return: weight map BxNxXxYxZ
"""
if type(lsource)==torch.Tensor:
lsource = lsource.detach().cpu().numpy()
sz = lsource.shape[2:]
nr_of_mg_weights = len(default_multi_gaussian_weights)
sh_weights = [lsource.shape[0]] + [nr_of_mg_weights] + list(sz)
weights = np.zeros(sh_weights, dtype='float32')
for g in range(nr_of_mg_weights):
weights[:, g, ...] = default_multi_gaussian_weights[g]
indexes = np.where(lsource>0)
for g in range(nr_of_mg_weights):
weights[indexes[0], g, indexes[2], indexes[3],indexes[4]] = np.sqrt(multi_gaussian_weights[g]) if weight_type=='w_K_w' else multi_gaussian_weights[g]
weights = MyTensor(weights)
local_smoother = get_single_gaussian_smoother(0.02,sz,spacing)
sm_weight = local_smoother.smooth(weights)
return sm_weight
def get_single_gaussian_smoother(gaussian_std,sz,spacing):
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = gaussian_std
s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params)
return s_m
def get_gaussion_weight_from_tsk_opt(opt):
return opt['']
def normalize_spacing(spacing,sz,silent_mode=False):
"""
Normalizes spacing.
:param spacing: Vector with spacing info, in XxYxZ format
:param sz: size vector in XxYxZ format
:return: vector with normalized spacings in XxYxZ format
"""
dim = len(spacing)
# first determine the largest extent
current_largest_extent = -1
extent = np.zeros_like(spacing)
for d in range(dim):
current_extent = spacing[d]*(sz[d]-1)
extent[d] = current_extent
if current_extent>current_largest_extent:
current_largest_extent = current_extent
scalingFactor = 1./current_largest_extent
normalized_spacing = spacing*scalingFactor
normalized_extent = extent*scalingFactor
if not silent_mode:
print('Normalize spacing: ' + str(spacing) + ' -> ' + str(normalized_spacing))
print('Normalize spacing, extent: ' + str(extent) + ' -> ' + str(normalized_extent))
return normalized_spacing
def dfield2bspline(dfield, sp_order=3, n_nodes=50, verbose=False):
# BSpline configuration
dim = dfield.GetDimension()
n_nodes = np.full(dim, n_nodes)
mesh_sz = n_nodes - sp_order
physical_dim = np.array(dfield.GetSpacing()) * (np.array(dfield.GetSize()) - 1)
# This transform is used to compute the origin and spacing properly
bstx = sitk.BSplineTransform(sp_order)
bstx.SetTransformDomainOrigin(dfield.GetOrigin())
bstx.SetTransformDomainPhysicalDimensions(physical_dim.tolist())
bstx.SetTransformDomainMeshSize(mesh_sz.tolist())
bstx.SetTransformDomainDirection(dfield.GetDirection())
if verbose:
print('Adjusting BSpline to the Displacement field...')
Idf = sitk.GetArrayViewFromImage(dfield)
img_params = []
for i in range(dim):
# Create the image for dim i
dfi = sitk.GetImageFromArray(Idf[..., i])
dfi.SetDirection(dfield.GetDirection())
dfi.SetOrigin(dfield.GetOrigin())
dfi.SetSpacing(dfield.GetSpacing())
# Downsampling the image field to the desired BSpline
downsampler = sitk.ResampleImageFilter()
downsampler.SetInterpolator(sitk.sitkBSpline)
downsampler.SetDefaultPixelValue(0)
# By default the Identity is used as transform
downsampler.SetSize(bstx.GetCoefficientImages()[i].GetSize())
downsampler.SetOutputSpacing(bstx.GetCoefficientImages()[i].GetSpacing())
downsampler.SetOutputOrigin(bstx.GetCoefficientImages()[i].GetOrigin())
downsampler.SetOutputDirection(dfield.GetDirection())
out = downsampler.Execute(dfi)
decomp = sitk.BSplineDecompositionImageFilter()
decomp.SetSplineOrder(sp_order)
img_params.append(decomp.Execute(out))
bstx = sitk.BSplineTransform(img_params, sp_order)
return bstx
# dtransform = sitk.ReadTransform(df_name)
# # Retrive the DField from the Transform
# dfield = sitk.DisplacementFieldTransform(dtransform).GetDisplacementField()
# # Fitting a BSpline from the Deformation Field
# bstx = dfield2bspline(dfield, verbose=True)
#
# # Save the BSpline Transform
# sitk.WriteTransform(bstx, df_name.replace('_disp.h5', '_disp_bs.tfm')) | 22,136 | 36.142617 | 157 | py |
easyreg | easyreg-master/easyreg/train_expr.py | from time import time
from .net_utils import *
def train_model(opt,model, dataloaders,writer):
since = time()
print_step = opt['tsk_set'][('print_step', [10,4,4], 'num of steps to print')]
num_epochs = opt['tsk_set'][('epoch', 100, 'num of training epoch')]
continue_train = opt['tsk_set'][('continue_train', False, 'continue to train')]
model_path = opt['tsk_set']['path']['model_load_path']
reset_train_epoch = opt['tsk_set'][('reset_train_epoch',False,'allow the training epoch to be reset of not')]
load_model_but_train_from_epoch =opt['tsk_set'][('load_model_but_train_from_epoch',0,'if reset_train_epoch is true, the epoch will be set as the given number')]
check_point_path = opt['tsk_set']['path']['check_point_path']
max_batch_num_per_epoch_list = opt['tsk_set'][('max_batch_num_per_epoch',(-1,-1,-1),"max batch number per epoch for train|val|debug")]
gpu_id = opt['tsk_set']['gpu_ids']
best_score = 0
is_best = False
start_epoch = 0
best_epoch = -1
phases =['train','val','debug']
global_step = {x:0 for x in phases}
period_loss = {x: 0. for x in phases}
period_detailed_scores = {x: 0. for x in phases}
max_batch_num_per_epoch ={x: max_batch_num_per_epoch_list[i] for i, x in enumerate(phases)}
period ={x: print_step[i] for i, x in enumerate(phases)}
check_best_model_period =opt['tsk_set'][('check_best_model_period',5,'save best performed model every # epoch')]
tensorboard_print_period = { phase: min(max_batch_num_per_epoch[phase],period[phase]) for phase in phases}
save_fig_epoch = opt['tsk_set'][('save_val_fig_epoch',2,'saving every num epoch')]
save_running_resolution_3d_img = opt['tsk_set'][('save_running_resolution_3d_img', False, 'saving fig')]
val_period = opt['tsk_set'][('val_period',10,'do validation every num epoch')]
save_fig_on = opt['tsk_set'][('save_fig_on',True,'saving fig')]
warmming_up_epoch = opt['tsk_set'][('warmming_up_epoch',2,'warming up the model in the first # epoch')]
continue_train_lr = opt['tsk_set'][('continue_train_lr', -1, 'learning rate for continuing to train')]
opt['tsk_set']['optim']['lr'] =opt ['tsk_set']['optim']['lr'] if not continue_train else continue_train_lr
if continue_train:
start_epoch, best_prec1, global_step=resume_train(model_path, model.network,model.optimizer)
if continue_train_lr > 0:
model.update_learning_rate(continue_train_lr)
print("the learning rate has been changed into {} when resuming the training".format(continue_train_lr))
model.iter_count = global_step['train']
if reset_train_epoch:
start_epoch=load_model_but_train_from_epoch
global_step = {x: load_model_but_train_from_epoch*max_batch_num_per_epoch[x] for x in phases}
print("the model has been initialized from extern, but will train from the epoch {}".format(start_epoch))
model.iter_count = 0
#
# gpu_count = torch.cuda.device_count()
#
# if gpu_count>0 and (len( gpu_id)>1 or gpu_id[0]==-1):
# model.network = nn.DataParallel(model.network)
# model.set_multi_gpu_on()
# #model.network = model.network.module
# model.network.cuda()
# else:
# model.network = model.network.cuda()
if gpu_id>=0:
model.network = model.network.cuda()
for epoch in range(start_epoch, num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
model.set_cur_epoch(epoch)
if epoch == warmming_up_epoch and not reset_train_epoch:
model.update_learning_rate()
for phase in phases:
# if is not training phase, and not the #*val_period , then break
if phase!='train' and epoch%val_period !=0:
break
# if # = 0 or None then skip the val or debug phase
if not max_batch_num_per_epoch[phase]:
continue
if phase == 'train':
model.update_scheduler(epoch)
model.set_train()
elif phase == 'val':
model.set_val()
else:
model.set_debug()
running_val_score =0.0
running_debug_score =0.0
for data in dataloaders[phase]:
global_step[phase] += 1
end_of_epoch = global_step[phase] % min(max_batch_num_per_epoch[phase], len(dataloaders[phase])) == 0
is_train = True if phase == 'train' else False
model.set_input(data,is_train)
loss = 0.
detailed_scores = 0.
if phase == 'train':
# from mermaid.utils import time_warped_function
# optimize_parameters = time_warped_function(model.optimize_parameters)
# optimize_parameters()
model.optimize_parameters()
# try:
# model.optimize_parameters()
# except:
# info = model.get_debug_info()
# print("the program meet error, now output the debugging info")
# print("{}".format(info))
# save_model(model,check_point_path,epoch,global_step,"epoch_debug")
# exit(1)
loss = model.get_current_errors()
elif phase =='val':
model.cal_val_errors()
if epoch % save_fig_epoch ==0 and save_fig_on:
model.save_fig(phase)
if save_running_resolution_3d_img:
model.save_fig_3D(phase='val')
score, detailed_scores= model.get_val_res()
print('val score of batch {} is {}:'.format(model.get_image_names(),score))
print('val detailed score of batch {} is {}:'.format(model.get_image_names(),detailed_scores))
model.update_loss(epoch,end_of_epoch)
running_val_score += score
loss = score
elif phase == 'debug':
print('debugging loss:')
model.cal_val_errors()
if epoch>0 and epoch % save_fig_epoch ==0 and save_fig_on:
model.save_fig(phase)
if save_running_resolution_3d_img:
model.save_fig_3D(phase='debug')
score, detailed_scores = model.get_val_res()
print('debug score of batch {} is {}:'.format(model.get_image_names(),score))
print('debug detailed score of batch {} is {}:'.format(model.get_image_names(),detailed_scores))
running_debug_score += score
loss = score
model.do_some_clean()
# save for tensorboard, both train and val will be saved
period_loss[phase] += loss
if not is_train:
period_detailed_scores[phase] += detailed_scores
if global_step[phase] > 0 and global_step[phase] % tensorboard_print_period[phase] == 0:
if not is_train:
period_avg_detailed_scores = np.squeeze(period_detailed_scores[phase]) / tensorboard_print_period[phase]
for i in range(len(period_avg_detailed_scores)):
writer.add_scalar('loss/'+ phase+'_l_{}'.format(i), period_avg_detailed_scores[i], global_step['train'])
period_detailed_scores[phase] = 0.
period_avg_loss = period_loss[phase] / tensorboard_print_period[phase]
writer.add_scalar('loss/' + phase, period_avg_loss, global_step['train'])
print("global_step:{}, {} lossing is{}".format(global_step['train'], phase, period_avg_loss))
period_loss[phase] = 0.
if end_of_epoch:
break
if phase == 'val':
epoch_val_score = running_val_score / min(max_batch_num_per_epoch['val'], dataloaders['data_size']['val'])
print('{} epoch_val_score: {:.4f}'.format(epoch, epoch_val_score))
if model.exp_lr_scheduler is not None:
model.exp_lr_scheduler.step(epoch_val_score)
print("debugging, the exp_lr_schedule works and update the step")
if epoch == 0:
best_score = epoch_val_score
if epoch_val_score > best_score or epoch_val_score==-1:
best_score = epoch_val_score
best_epoch = epoch
save_model(model,check_point_path,epoch,global_step,'epoch_'+str(epoch),True,best_score)
if phase == 'train':
# currently we just save model by period, so need to check the best model manually
if epoch % check_best_model_period==0: #is_best and epoch % check_best_model_period==0:
save_model(model,check_point_path,epoch,global_step,'epoch_'+str(epoch),False,best_score)
if phase == 'debug':
epoch_debug_score = running_debug_score / min(max_batch_num_per_epoch['debug'], dataloaders['data_size']['debug'])
print('{} epoch_debug_score: {:.4f}'.format(epoch, epoch_debug_score))
time_elapsed = time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val score : {:4f} is at epoch {}'.format(best_score, best_epoch))
writer.close()
# return the model at the last epoch, not the best epoch
return model
def save_model(model,check_point_path,epoch,global_step,name, is_best=False, best_score=-1):
if isinstance(model.optimizer, tuple):
# for multi-optimizer cases
optimizer_state = []
for term in model.optimizer:
optimizer_state.append(term.state_dict())
optimizer_state = tuple(optimizer_state)
else:
optimizer_state = model.optimizer.state_dict()
save_checkpoint({'epoch': epoch, 'state_dict': model.network.state_dict(), 'optimizer': optimizer_state,
'best_score': best_score, 'global_step': global_step}, is_best, check_point_path,name, '')
| 10,456 | 48.795238 | 164 | py |
easyreg | easyreg-master/easyreg/base_seg_model.py |
from .utils import *
import SimpleITK as sitk
from tools.visual_tools import save_3D_img_from_numpy
class SegModelBase():
"""
the base class for image segmentation
"""
def name(self):
return 'SegModelBase'
def initialize(self, opt):
"""
:param opt: ParameterDict, task settings
:return: None
"""
self.opt = opt
self.gpu_ids = opt['tsk_set'][('gpu_ids',0,'the gpu id used for network methods')]
self.isTrain = opt['tsk_set'][('train',True,'True, take the train mode')]
self.save_dir = opt['tsk_set']['path']['check_point_path']
self.record_path = opt['tsk_set']['path']['record_path']
self.spacing = None
self.continue_train = opt['tsk_set'][('continue_train',False,"for network training method, continue training the model loaded from model_path")]
self.criticUpdates = opt['tsk_set'][('criticUpdates',1,"for network training method, the num determines gradient update every # iter")]
self.n_in_channel = opt['tsk_set'][('n_in_channel',1,"for network training method, the color channel typically set to 1")]
self.input_img_sz = self.opt['dataset'][('img_after_resize',None,"image size after resampling")]
self.original_im_sz = None
self.original_spacing = None
#self.input_resize_factor = opt['dataset']['input_resize_factor'] # todo remove this
self.optimizer= None
self.lr_scheduler = None
self.exp_lr_scheduler= None
self.iter_count = 0
self.dim = 3#len(self.input_img_sz)
self.network =None
self.val_res_dic = {}
self.fname_list = None
self.input = None
self.output = None
self.gt = None
self.multi_gpu_on =False # todo for now the distributed computing is not supported
def set_input(self, input):
"""
set the input of the method
:param input:
:return:
"""
self.input = input
def forward(self,input=None):
pass
def test(self):
pass
def set_train(self):
"""
set the model in train mode (only for learning methods)
:return:
"""
self.network.train(True)
self.is_train =True
def set_val(self):
"""
set the model in validation mode (only for learning methods)
:return:
"""
self.network.train(False)
self.is_train = False
def set_debug(self):
"""
set the model in debug (subset of training set) mode (only for learning methods)
:return:
"""
self.network.train(False)
self.is_train = False
def set_test(self):
"""
set the model in test mode ( only for learning methods)
:return:
"""
self.network.train(False)
self.is_train = False
def set_multi_gpu_on(self):
"""
multi gpu support (disabled)
:return:
"""
self.multi_gpu_on = True
def optimize_parameters(self):
"""
optimize model parameters
:return:
"""
pass
def get_debug_info(self):
""" get debug info"""
return None
# get image paths
def get_image_names(self):
"""get image name list"""
return self.fname_list
def set_cur_epoch(self,epoch):
"""
set epoch
:param epoch:
:return:
"""
self.cur_epoch = epoch
def cal_loss(self,output= None):
pass
def get_current_errors(self):
"""
get the current loss
:return:
"""
return self.loss.data[0]
def cal_val_errors(self):
""" compute the loss on validatoin set"""
self.cal_test_errors()
def cal_test_errors(self):
""" compute the loss on test set"""
self.get_evaluation()
def get_evaluation(self):
"""evaluate the performance of the current model"""
pass
def update_loss(self, epoch, end_of_epoch):
pass
def get_val_res(self, detail=False):
"""
if the label map is given, evaluate the overlap sturcture
:param detail:
if detail, then output average dice score of each non-bg structure; and different scores of each structure
if not, then output average dice score of each non-bg structure; and dice score of each structure
:return:
"""
if len(self.val_res_dic):
if not detail:
return np.mean(self.val_res_dic['batch_avg_res']['dice'][0, 1:]), self.val_res_dic['batch_avg_res'][
'dice']
else:
return np.mean(self.val_res_dic['batch_avg_res']['dice'][0, 1:]), self.val_res_dic['multi_metric_res']
else:
return -1, np.array([-1, -1])
def get_test_res(self, detail=False):
"""
if the label map is given, evaluate the overlap strucrue
:param detail:
if detail, then output average dice score of each non-bg structure; and different scores of each structure
if not, then output average dice score of each non-bg structure; and dice score of each structure
:return:
"""
return self.get_val_res(detail=detail)
def save_fig(self,phase):
pass
def do_some_clean(self):
self.loss = None
self.gt = None
self.input = None
self.output = None
def save_fig_3D(self,phase=None):
"""
save 3d output,
the propose of this function is for visualize the seg performance
for toolkit based method, they will default save the 3d images, so no need to call this function
the physical information like origin, orientation is not saved, todo, include this information
:param phase: train|val|test|debug
:return:
"""
if type(self.output)==torch.Tensor:
output = self.output.detach().cpu().numpy()
else:
output = self.output
if type(self.gt)==torch.Tensor:
gt = self.gt.detach().cpu().numpy()
else:
gt = self.gt
output = output.astype(np.int32)
if gt is not None:
gt = gt.astype(np.int32)
spacing = self.spacing.cpu().numpy()
saving_folder_path = os.path.join(self.record_path, '3D')
make_dir(saving_folder_path)
num_output = output.shape[0]
for i in range(num_output):
appendix = self.fname_list[i] + "_"+phase+ "_iter_" + str(self.iter_count)
saving_file_path = saving_folder_path + '/' + appendix + "_output.nii.gz"
output = sitk.GetImageFromArray(output[i, 0, ...])
output.SetSpacing(np.flipud(spacing[i]))
sitk.WriteImage(output, saving_file_path)
if gt is not None:
saving_file_path = saving_folder_path + '/' + appendix + "_gt.nii.gz"
output = sitk.GetImageFromArray(gt[i, 0, ...])
output.SetSpacing(np.flipud(spacing[i]))
sitk.WriteImage(output, saving_file_path)
| 7,185 | 26.961089 | 152 | py |
easyreg | easyreg-master/easyreg/compare_sym.py | import torch
import numpy as np
import sys,os
import SimpleITK as sitk
from easyreg.net_utils import Bilinear
import ants
from .nifty_reg_utils import nifty_reg_resample
import subprocess
import nibabel as nib
from mermaid.utils import identity_map_multiN
# record_path ='/playpen/zyshen/debugs/compare_sym'
# moving_img_path = os.path.join('/playpen/zyshen/debugs/compare_sym', 'source.nii.gz')
# if not os.path.exists(record_path):
# os.mkdir(record_path)
# dim = 3
# szEx =np.array([80,192,192]) # size of the desired images: (sz)^dim
# I0,spacing = eg.CreateGrid(dim,add_noise_to_bg=False).create_image_single(szEx, None) # create a default image size with two sample squares
# sz = np.array(I0.shape)
# I0 = np.squeeze(I0)
# sitk.WriteImage(sitk.GetImageFromArray(I0),moving_img_path)
#
def init_source_image(record_path):
img_size = [80,192,192]
spacing = 1. / (np.array(img_size) - 1)
identity_map = identity_map_multiN([1,1]+img_size, spacing)
print(identity_map.shape)
id_path = os.path.join(record_path, 'identity.nii.gz')
id_x_pth = id_path.replace('identity','identity_x')
id_y_pth = id_path.replace('identity', 'identity_y')
id_z_pth = id_path.replace('identity','identity_z')
sitk.WriteImage(sitk.GetImageFromArray(identity_map[0,0]),id_x_pth)
sitk.WriteImage(sitk.GetImageFromArray(identity_map[0,1]),id_y_pth)
sitk.WriteImage(sitk.GetImageFromArray(identity_map[0,2]),id_z_pth)
return [id_x_pth, id_y_pth,id_z_pth]
def __inverse_name(name):
"""get the name of the inversed registration pair"""
name = name + '_inverse'
return name
def compute_sym_metric(refer, output, shrink_margin=(10,20,20)):
return np.mean((refer[shrink_margin[0]:-shrink_margin[0],shrink_margin[1]:-shrink_margin[1],shrink_margin[2]:-shrink_margin[2]]
-output[shrink_margin[0]:-shrink_margin[0],shrink_margin[1]:-shrink_margin[1],shrink_margin[2]:-shrink_margin[2]])**2)
def sitk_grid_sampling(fixed,moving, displacement,is_label=False):
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
interpolator = sitk.sitkNearestNeighbor if is_label else sitk.sitkLinear
resampler.SetInterpolator(interpolator)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(displacement)
out = resampler.Execute(moving)
return out
def cal_ants_sym(record_path,fname,moving_img_path):
inv_fname = __inverse_name(fname)
disp_pth = [os.path.join(record_path, fname + '_disp.nii.gz'),os.path.join(record_path, fname + '_affine.mat')]
inv_disp_pth = [os.path.join(record_path, inv_fname + '_disp.nii.gz'),os.path.join(record_path, inv_fname + '_affine.mat')]
# source1 = sitk.GetArrayFromImage(sitk.ReadImage(moving_img_path))
# source2 = ants.image_read(moving_img_path).numpy()
# source2 = np.transpose(source2)
source = ants.image_read(moving_img_path)
target = ants.image_read(moving_img_path)
output = ants.apply_transforms(fixed=target, moving=source, transformlist=disp_pth)
output = ants.apply_transforms(fixed=target, moving=output, transformlist=inv_disp_pth)
output = output.numpy()
output = np.transpose(output)
return output
def cal_demons_sym(record_path,fname,moving_img_path):
inv_fname = __inverse_name(fname)
disp_pth = os.path.join(record_path, fname + '_disp.nii.gz')
inv_disp_pth = os.path.join(record_path, inv_fname + '_disp.nii.gz')
disp = sitk.ReadImage(disp_pth)
inv_disp = sitk.ReadImage(inv_disp_pth)
tx = sitk.DisplacementFieldTransform(disp)
inv_tx = sitk.DisplacementFieldTransform(inv_disp)
target =sitk.ReadImage(moving_img_path)
af_txt = os.path.join(record_path,fname+'_af.txt')
af_pth = os.path.join(record_path,'af_output.nii.gz')
cmd = nifty_reg_resample(ref=moving_img_path, flo=moving_img_path, trans=af_txt, res=af_pth, inter=0)
process = subprocess.Popen(cmd, shell=True)
process.wait()
source = sitk.ReadImage(af_pth)
output = sitk_grid_sampling(target, source, tx, is_label=False)
forward_output = os.path.join(record_path,'forward_output.nii.gz')
sitk.WriteImage(output,forward_output)
inv_af_txt = os.path.join(record_path, inv_fname + '_af.txt')
for_inv_af_pth = os.path.join(record_path,'for_inv_af_output.nii.gz')
cmd = nifty_reg_resample(ref=moving_img_path, flo=forward_output, trans=inv_af_txt, res=for_inv_af_pth, inter=0)
process = subprocess.Popen(cmd, shell=True)
process.wait()
source = sitk.ReadImage(for_inv_af_pth)
output = sitk_grid_sampling(target, source, inv_tx, is_label=False)
output = sitk.GetArrayFromImage(output)
return output
def cal_demons_sym_new(record_path,fname,moving_img_path):
inv_fname = __inverse_name(fname)
disp_pth = os.path.join(record_path, fname + '_disp.nii.gz')
inv_disp_pth = os.path.join(record_path, inv_fname + '_disp.nii.gz')
disp = sitk.ReadImage(disp_pth)
inv_disp = sitk.ReadImage(inv_disp_pth)
tx = sitk.DisplacementFieldTransform(disp)
inv_tx = sitk.DisplacementFieldTransform(inv_disp)
target =sitk.ReadImage(moving_img_path)
source = sitk.ReadImage(moving_img_path)
output = sitk_grid_sampling(target, source, tx, is_label=False)
output = sitk_grid_sampling(target, output, inv_tx, is_label=False)
output = sitk.GetArrayFromImage(output)
return output
def cal_nifty_sym(record_path,fname,moving_img_path):
inv_fname = __inverse_name(fname)
def_pth = os.path.join(record_path, fname + '_deformation.nii.gz')
inv_def_pth = os.path.join(record_path, inv_fname + '_deformation.nii.gz')
output_path = os.path.join(record_path, 'output_sym.nii.gz')
cmd = '' + nifty_reg_resample(ref=moving_img_path, flo=moving_img_path, trans=def_pth, res=output_path, inter=0)
process = subprocess.Popen(cmd, shell=True)
process.wait()
cmd = '' + nifty_reg_resample(ref=moving_img_path, flo=output_path, trans=inv_def_pth, res=output_path, inter=0)
process = subprocess.Popen(cmd, shell=True)
process.wait()
output = sitk.ReadImage(output_path)
output = sitk.GetArrayFromImage(output)
return output
def cal_mermaid_sym(record_path,fname, moving_img_path):
inv_fname = __inverse_name(fname)
def_pth = os.path.join(record_path, fname + '_phi.nii.gz')
inv_def_pth = os.path.join(record_path, inv_fname + '_phi.nii.gz')
source = sitk.GetArrayFromImage(sitk.ReadImage(moving_img_path))
source=np.expand_dims(np.expand_dims(source, 0), 0)
source = torch.Tensor(source)
phi = nib.load(def_pth).get_fdata()
inv_phi = nib.load(inv_def_pth).get_fdata()
phi = torch.Tensor(np.expand_dims(phi,0))
inv_phi = torch.Tensor(np.expand_dims(inv_phi,0))
b1 = Bilinear(zero_boundary=True,using_scale=False)
b2 = Bilinear(zero_boundary=True,using_scale=False)
output = b1(source, phi)
output = b2(output,inv_phi)
return np.squeeze(output.detach().numpy())
def cal_sym(opt,dataloaders,task_name=''):
record_path = opt['tsk_set']['path']['record_path']
model_name = opt['tsk_set']['model']
orginal_img_path_list = init_source_image(record_path)
#os.path.join('/playpen/zyshen/debugs/compare_sym', 'source.nii.gz')
phases=['test']
orginal_list = [sitk.GetArrayFromImage(sitk.ReadImage(pth)) for pth in orginal_img_path_list]
cal_sym_func = None
if model_name == 'ants':
cal_sym_func = cal_ants_sym
if model_name == 'demons':
cal_sym_func = cal_demons_sym_new
if model_name =='nifty_reg':
cal_sym_func = cal_nifty_sym
if model_name =='reg_net' or model_name == 'mermaid_iter':
cal_sym_func = cal_mermaid_sym
for phase in phases:
num_samples = len(dataloaders[phase])
records_sym_val_np = np.zeros(num_samples)
sym_val_res = 0.
avg_output = [0]*len(orginal_list)
for i, data in enumerate(dataloaders[phase]):
fname = list(data[1])[0]
batch_size = len(data[0]['image'])
extra_res =0.
for j,orginal_img_path in enumerate(orginal_img_path_list):
output = cal_sym_func(record_path,fname,orginal_img_path)
extra_tmp = compute_sym_metric(orginal_list[j],output)
extra_res += extra_tmp
avg_output[j] += output
sym_val_res += extra_res * batch_size
records_sym_val_np[i] = extra_res
print("id {} and current pair name is : {}".format(i,data[1]))
print('the current sym val is {}'.format(extra_res))
print('the current average sym val is {}'.format(sym_val_res/(i+1)/batch_size))
avg_output = [res/len(dataloaders[phase].dataset) for res in avg_output]
for i,res in enumerate(avg_output):
sitk.WriteImage(sitk.GetImageFromArray(res),os.path.join(record_path,'avg_image_'+str(i)+'.nii.gz'))
sym_val_res = sym_val_res/len(dataloaders[phase].dataset)
print("the average {}_ sym val: {} :".format(phase, sym_val_res))
np.save(os.path.join(record_path,task_name+'records_sym'),records_sym_val_np)
| 9,171 | 39.764444 | 142 | py |
easyreg | easyreg-master/easyreg/mermaid_iter.py | from .base_mermaid import MermaidBase
from .utils import *
import mermaid.utils as py_utils
import mermaid.simple_interface as SI
class MermaidIter(MermaidBase):
def name(self):
return 'mermaid-iter'
def initialize(self,opt):
"""
:param opt: ParameterDict, task settings
:return:
"""
MermaidBase.initialize(self,opt)
method_name =opt['tsk_set']['method_name']
if method_name =='affine':
self.affine_on = True
self.nonp_on = False
elif method_name =='nonp':
self.affine_on = True
self.nonp_on = True
elif method_name=='nonp_only':
self.affine_on = False
self.nonp_on = True
self.si = SI.RegisterImagePair()
self.opt_optim = opt['tsk_set']['optim']
self.compute_inverse_map = opt['tsk_set']['reg'][('compute_inverse_map', False,"compute the inverse transformation map")]
self.opt_mermaid= self.opt['tsk_set']['reg']['mermaid_iter']
self.use_init_weight = self.opt_mermaid[('use_init_weight',False,'whether to use init weight for RDMM registration')]
self.init_weight = None
self.setting_for_mermaid_affine = self.opt_mermaid[('mermaid_affine_json','','the json path for the setting for mermaid affine')]
self.setting_for_mermaid_nonp = self.opt_mermaid[('mermaid_nonp_json','','the json path for the setting for mermaid non-parametric')]
nonp_settings = pars.ParameterDict()
nonp_settings.load_JSON(self.setting_for_mermaid_nonp)
self.nonp_model_name = nonp_settings['model']['registration_model']['type']
self.weights_for_fg = self.opt_mermaid[('weights_for_fg',[0,0,0,0,1.],'regularizer weight for the foregound area, this should be got from the mermaid_json file')]
self.weights_for_bg = self.opt_mermaid[('weights_for_bg',[0,0,0,0,1.],'regularizer weight for the background area')]
self.saved_mermaid_setting_path = None
self.saved_affine_setting_path = None
self.inversed_map = None
self.use_01 = True
def set_input(self, data, is_train=True):
data[0]['image'] =(data[0]['image'].cuda()+1)/2
if 'label' in data[0]:
data[0]['label'] =data[0]['label'].cuda()
moving, target, l_moving,l_target = get_reg_pair(data[0])
input = data[0]['image']
self.input_img_sz = list(moving.shape)[2:]
self.original_spacing = data[0]['original_spacing']
self.original_im_sz = data[0]['original_sz']
self.spacing = data[0]['spacing'][0] if self.use_physical_coord else 1. / (np.array(self.input_img_sz) - 1)
self.spacing = np.array(self.spacing) if type(self.spacing) is not np.ndarray else self.spacing
self.moving = moving
self.target = target
self.l_moving = l_moving
self.l_target = l_target
self.input = input
self.fname_list = list(data[1])
self.pair_path = data[0]['pair_path']
def affine_optimization(self):
"""
call affine optimization registration in mermaid
:return: warped image, transformation map, affine parameter, loss(None)
"""
self.si = SI.RegisterImagePair()
extra_info = pars.ParameterDict()
extra_info['pair_name'] = self.fname_list
af_sigma = self.opt_mermaid['affine']['sigma']
self.si.opt = None
self.si.set_initial_map(None)
if self.saved_affine_setting_path is None:
self.saved_affine_setting_path = self.save_setting(self.setting_for_mermaid_affine,self.record_path,'affine_setting.json')
cur_affine_json_saving_path =(os.path.join(self.record_path,'cur_settings_affine.json'),os.path.join(self.record_path,'cur_settings_affine_comment.json'))
self.si.register_images(self.moving, self.target, self.spacing,extra_info=extra_info,LSource=self.l_moving,LTarget=self.l_target,
visualize_step=None,
use_multi_scale=True,
rel_ftol=0,
similarity_measure_sigma=af_sigma,
json_config_out_filename=cur_affine_json_saving_path, #########################################
params =self.saved_affine_setting_path) #'../easyreg/cur_settings_affine_tmp.json'
self.output = self.si.get_warped_image()
self.phi = self.si.opt.optimizer.ssOpt.get_map()
self.phi = self.phi.detach().clone()
# for i in range(self.dim):
# self.phi[:, i, ...] = self.phi[:, i, ...] / ((self.input_img_sz[i] - 1) * self.spacing[i])
Ab = self.si.opt.optimizer.ssOpt.model.Ab
if self.compute_inverse_map:
inv_Ab = py_utils.get_inverse_affine_param(Ab.detach())
identity_map = py_utils.identity_map_multiN([1, 1] + self.input_img_sz, self.spacing)
self.inversed_map = py_utils.apply_affine_transform_to_map_multiNC(inv_Ab, torch.Tensor(identity_map).cuda()) ##########################3
self.inversed_map = self.inversed_map.detach()
self.afimg_or_afparam = Ab
save_affine_param_with_easyreg_custom(self.afimg_or_afparam,self.record_path,self.fname_list,affine_compute_from_mermaid=True)
return self.output.detach_(), self.phi.detach_(), self.afimg_or_afparam.detach_(), None
def nonp_optimization(self):
"""
call non-parametric image registration in mermaid
if the affine registration is performed first, the affine transformation map would be taken as the initial map
if the init weight on mutli-gaussian regularizer are set, the initial weight map would be computed from the label map, make sure the model called support spatial variant regularizer
:return: warped image, transformation map, affined image, loss(None)
"""
affine_map = None
if self.affine_on:
affine_map = self.si.opt.optimizer.ssOpt.get_map()
self.si = SI.RegisterImagePair()
extra_info = pars.ParameterDict()
extra_info['pair_name'] = self.fname_list
self.si.opt = None
if affine_map is not None:
self.si.set_initial_map(affine_map.detach(), self.inversed_map)
if self.use_init_weight:
init_weight = get_init_weight_from_label_map(self.l_moving, self.spacing,self.weights_for_bg,self.weights_for_fg)
init_weight = py_utils.compute_warped_image_multiNC(init_weight,affine_map,self.spacing,spline_order=1,zero_boundary=False)
self.si.set_weight_map(init_weight.detach(), freeze_weight=True)
if self.saved_mermaid_setting_path is None:
self.saved_mermaid_setting_path = self.save_setting(self.setting_for_mermaid_nonp,self.record_path,"nonp_setting.json")
cur_mermaid_json_saving_path =(os.path.join(self.record_path,'cur_settings_nonp.json'),os.path.join(self.record_path,'cur_settings_nonp_comment.json'))
self.si.register_images(self.moving, self.target, self.spacing, extra_info=extra_info, LSource=self.l_moving,
LTarget=self.l_target,
visualize_step=None,
use_multi_scale=True,
rel_ftol=0,
compute_inverse_map=self.compute_inverse_map,
json_config_out_filename=cur_mermaid_json_saving_path,
params=self.saved_mermaid_setting_path) #'../mermaid_settings/cur_settings_svf_dipr.json'
self.afimg_or_afparam = self.output # here return the affine image
self.output = self.si.get_warped_image()
self.phi = self.si.opt.optimizer.ssOpt.get_map()
# for i in range(self.dim):
# self.phi[:,i,...] = self.phi[:,i,...]/ ((self.input_img_sz[i]-1)*self.spacing[i])
if self.compute_inverse_map:
self.inversed_map = self.si.get_inverse_map().detach()
return self.output.detach_(), self.phi.detach_(), self.afimg_or_afparam.detach_() if self.afimg_or_afparam is not None else None, None
def save_setting(self,path, output_path,fname='mermaid_setting.json'):
"""
save the mermaid settings into task record folder
:param path: path of mermaid setting file
:param output_path: path of task record folder
:param fname: saving name
:return: saved setting path
"""
params = pars.ParameterDict()
params.load_JSON(path)
os.makedirs(output_path, exist_ok=True)
output_path = os.path.join(output_path, fname)
params.write_JSON(output_path, save_int=False)
return output_path
def save_image_into_original_sz_with_given_reference(self):
"""
save the image into original sz (the sz before resampling) and with the original physical settings, i.e. spacing, origin, orientation
:return:
"""
# the original image sz in one batch should be the same
self._save_image_into_original_sz_with_given_reference(self.pair_path,self.phi, inverse_phis=self.inversed_map, use_01=self.use_01)
def forward(self,input=None):
if self.affine_on and not self.nonp_on:
return self.affine_optimization()
elif self.affine_on and self.nonp_on:
self.affine_optimization()
return self.nonp_optimization()
else:
return self.nonp_optimization()
def cal_val_errors(self):
self.cal_test_errors()
def cal_test_errors(self):
self.get_evaluation()
def get_jacobi_val(self):
"""
:return: the sum of absolute value of negative determinant jacobi, the num of negative determinant jacobi voxels
"""
return self.jacobi_val
def get_the_jacobi_val(self):
return self.jacobi_val
def __get_adaptive_smoother_map(self):
"""
get the adaptive smoother weight map from spatial-variant regualrizer model
supported weighting type 'sqrt_w_K_sqrt_w' and 'w_K_w'
for weighting type == 'w_k_w'
:math:'\sigma^{2}(x)=\sum_{i=0}^{N-1} w^2_{i}(x) \sigma_{i}^{2}'
for weighting type = 'sqrt_w_K_sqrt_w'
:math:'\sigma^{2}(x)=\sum_{i=0}^{N-1} w_{i}(x) \sigma_{i}^{2}'
:return: adapative smoother weight map \sigma
"""
model = self.si.opt.optimizer.ssOpt.model
smoother = self.si.opt.optimizer.ssOpt.model.smoother
adaptive_smoother_map = model.local_weights.detach()
gaussian_weights = smoother.get_gaussian_weights().detach()
print(" the current global gaussian weight is {}".format(gaussian_weights))
gaussian_stds = smoother.get_gaussian_stds().detach()
print(" the current global gaussian stds is {}".format(gaussian_stds))
view_sz = [1] + [len(gaussian_stds)] + [1] * len(self.spacing)
gaussian_stds = gaussian_stds.view(*view_sz)
weighting_type = smoother.weighting_type
if weighting_type == 'w_K_w':
adaptive_smoother_map = adaptive_smoother_map**2 # todo this is only necessary when we use w_K_W
smoother_map = adaptive_smoother_map*(gaussian_stds**2)
smoother_map = torch.sqrt(torch.sum(smoother_map,1,keepdim=True))
#_,smoother_map = torch.max(adaptive_smoother_map.detach(),dim=1,keepdim=True)
self._display_stats(smoother_map.float(),'statistic for weighted smoother map')
return smoother_map
def __get_momentum(self):
param = self.si.get_model_parameters()
return param['m'].detach()
def _display_stats(self, Ia, iname):
"""
statistic analysis on variable
:param Ia: the input variable
:param iname: variable name
:return:
"""
Ia_min = Ia.min().detach().cpu().numpy()
Ia_max = Ia.max().detach().cpu().numpy()
Ia_mean = Ia.mean().detach().cpu().numpy()
Ia_std = Ia.std().detach().cpu().numpy()
print('{}:the: [min {:.2f},mean {:.2f},max {:.2f}](std {:.2f})'.format(iname, Ia_min,Ia_mean,Ia_max,Ia_std))
def get_extra_to_plot(self):
"""
plot extra image, i.e. the initial weight map of rdmm model
:return:
"""
if self.nonp_on:
if self.nonp_model_name=='lddmm_adapt_smoother_map':
return self.__get_adaptive_smoother_map(), 'inital_weight'
else:
return self.__get_momentum(), "Momentum"
else:
return None, None
def set_val(self):
self.is_train = False
def set_debug(self):
self.is_train = False
def set_test(self):
self.is_train = False
| 12,862 | 41.3125 | 189 | py |
easyreg | easyreg-master/easyreg/data_manager.py | from easyreg.reg_data_utils import *
from torchvision import transforms
import torch
from easyreg import reg_data_loader_onfly as reg_loader_of
from easyreg import seg_data_loader_onfly as seg_loader_of
from easyreg.reg_data_loader_onfly import ToTensor
# todo reformat the import style
class DataManager(object):
def __init__(self, task_name, dataset_name):
"""
the class for data management
including two part: 1. preprocess data (disabled) 2. set dataloader
todo the preprocess data is disabled for the current version, so the data should be prepared ahead.
1. preprocess data, currently support lpba, ibsr, oasis2d, cumc, oai
the data will be saved in output_path/auto_generated_name/train|val|test|debug
2. dataloader, pytorch multi-thread dataloader
return a dict, each train/val/test/debug phase has its own dataloader
The path is organized as output_path/task_name with regard to data processing/ train|val|test|debug
:param task_name: the name can help recognize the data
:param dataset_name: the name of the the dataset
:param sched: 'inter' or 'intra', sched is can be used only when the dataset includes interpersonal and intrapersonal results
"""
self.task_name = task_name
"""name for task"""
self.task_type = None
"""" the type of task 'reg','seg'(disabled)'"""
self.full_task_name = None
"""name of the output folder"""
self.dataset_name = dataset_name
""" name of the dataset i.e. lpba, ibsr, oasis """
self.sched = ''
"""reg: inter, intra seg:'patched' 'nopatched'"""
self.data_path = None
"""path of the dataset"""
self.output_path= None
"""path of the processed data"""
self.task_root_path = None
"""output_path/full_task_name, or can be set manual"""
self.label_path = None
"""path of the labels of the dataset"""
self.divided_ratio = [0.7,0.1,0.2]
"""divided data into train, val, and test sets"""
self.dataset = None
self.task_path =None
"""train|val|test: dic task_root_path/train|val|test"""
self.seg_option = None
""" settings for seg task"""
self.reg_option = None
""" settings for reg task"""
self.phases =['train','val','test','debug']
""" phases, the debug here refers to the subtraining set to check overfitting"""
def set_task_type(self,task_type):
""" set task type, 'reg' or 'seg' """
self.task_type = task_type
def set_data_path(self, data_path):
""" set data path, here refers to the folder path of dataset"""
self.data_path = data_path
def set_output_path(self,output_path):
""" set the output path for propocessed data"""
self.output_path = output_path
def set_label_path(self, label_path):
"""set the label path, here refers to the folder path of labels"""
self.label_path = label_path
def set_sched(self,sched):
""" set the sched, can be 'inter','intra'"""
self.sched = sched
def set_divided_ratio(self,divided_ratio):
""" set the divide raito, the divide ratio of the dataset with regard to train, val and test"""
self.divided_ratio = divided_ratio
def set_full_task_name(self, full_task_name):
"""
todo disabled for the cur version
the task name that combined settings
:param full_task_name:
:return:
"""
self.full_task_name = full_task_name
def set_reg_option(self,option):
""" set the registrion settings"""
self.reg_option = option
def set_seg_option(self,option):
""" set the registrion settings"""
self.seg_option = option
def get_data_path(self):
""" return the data path"""
return self.data_path
def get_full_task_name(self):
""" return the full task name"""
return os.path.split(self.task_root_path)[1]
def generate_saving_path(self, auto=True):
self.task_root_path = os.path.join(self.output_path,self.task_name)
def generate_task_path(self):
""" the saving path for proprocessed data is output_path/task_name/train|val|test|debug"""
self.task_path = {x:os.path.join(self.task_root_path,x) for x in ['train','val', 'test','debug']}
return self.task_path
def get_task_root_path(self):
""" return the task root path, refers to output_path/task_name"""
return self.task_root_path
def manual_set_task_root_path(self, task_root_path):
"""
if switch the task into existed task, this is the only setting need to be set
:param task_path: given existed task_root_path
:return:
"""
self.task_root_path = task_root_path
self.task_path = {x:os.path.join(task_root_path,x) for x in ['train','val', 'test','debug']}
return self.task_path
def init_dataset(self):
""" preprocess the dataset"""
if self.task_type == 'reg':
self.init_reg_dataset()
else:
raise(ValueError,"not implemented")
def init_reg_dataset(self):
""" preprocess the registration dataset"""
import data_pre.reg_data_pool as reg_pool
self.dataset = reg_pool.RegDatasetPool().create_dataset(self.dataset_name,self.sched, self.full_comb)
self.dataset.set_data_path(self.data_path)
self.dataset.set_output_path(self.task_root_path)
self.dataset.set_divided_ratio(self.divided_ratio)
def prepare_data(self):
"""
preprocess data into h5py
:return:
"""
self.dataset.prepare_data()
def init_dataset_type(self):
self.cur_dataset = reg_loader_of.RegistrationDataset if self.task_type=='reg' else seg_loader_of.SegmentationDataset
def init_dataset_loader(self,transformed_dataset,batch_size):
"""
initialize the data loaders: set work number, set work type( shuffle for trainning, order for others)
:param transformed_dataset:
:param batch_size: the batch size of each iteration
:return: dict of dataloaders for train|val|test|debug
"""
def _init_fn(worker_id):
np.random.seed(12 + worker_id)
num_workers_reg ={'train':16,'val':0,'test':0,'debug':0}#{'train':0,'val':0,'test':0,'debug':0}#{'train':8,'val':4,'test':4,'debug':4}
shuffle_list ={'train':True,'val':False,'test':False,'debug':False}
batch_size = [batch_size]*4 if not isinstance(batch_size, list) else batch_size
batch_size = {'train': batch_size[0],'val':batch_size[1],'test':batch_size[2],'debug':batch_size[3]}
dataloaders = {x: torch.utils.data.DataLoader(transformed_dataset[x], batch_size=batch_size[x],
shuffle=shuffle_list[x], num_workers=num_workers_reg[x],worker_init_fn=_init_fn) for x in self.phases}
return dataloaders
def data_loaders(self, batch_size=20,is_train=True):
"""
get the data_loaders for the train phase and the test phase
:param batch_size: the batch size for each iteration
:param is_train: in train mode or not
:return: dict of dataloaders for train phase or the test phase
"""
if is_train:
self.phases = ['train', 'val','debug']
else:
self.phases = ['test']
composed = transforms.Compose([ToTensor()])
self.init_dataset_type()
option = self.seg_option if self.task_type=="seg" else self.reg_option
transformed_dataset = {x: self.cur_dataset(data_path=self.task_path[x],phase=x,transform=composed,option=option) for x in self.phases}
dataloaders = self.init_dataset_loader(transformed_dataset, batch_size)
dataloaders['data_size'] = {x: len(dataloaders[x]) for x in self.phases}
dataloaders['info'] = {x: transformed_dataset[x].name_list for x in self.phases}
print('dataloader is ready')
return dataloaders
if __name__ == "__main__":
from tools.module_parameters import ParameterDict
prepare_data = True
task_root_path = '/home/zyshen/proj/local_debug/brain_seg'
task_type = 'seg'
dataset_name = 'lpba'
task_name = 'debugging'
settings = ParameterDict()
settings.load_JSON('/home/zyshen/proj/easyreg/debug/settings/data_setting.json')
seg_option = settings["datapro"]
data_manager = DataManager(task_name, dataset_name)
data_manager.set_task_type('seg')
data_manager.manual_set_task_root_path(task_root_path)
data_manager.generate_task_path()
data_manager.seg_option = seg_option
dataloaders = data_manager.data_loaders(batch_size=3)
for data in dataloaders['train']:
pass
| 8,907 | 39.126126 | 152 | py |
easyreg | easyreg-master/easyreg/reg_data_loader_onfly.py | from __future__ import print_function, division
import blosc
import torch
from torch.utils.data import Dataset
from .reg_data_utils import *
import SimpleITK as sitk
from multiprocessing import *
blosc.set_nthreads(1)
import progressbar as pb
class RegistrationDataset(Dataset):
"""registration dataset."""
def __init__(self, data_path,phase=None, transform=None, option=None):
"""
the dataloader for registration task, to avoid frequent disk communication, all pairs are compressed into memory
:param data_path: string, path to the data
the data should be preprocessed and saved into txt
:param phase: string, 'train'/'val'/ 'test'/ 'debug' , debug here means a subset of train data, to check if model is overfitting
:param transform: function, apply transform on data
: seg_option: pars, settings for segmentation task, None for segmentation task
: reg_option: pars, settings for registration task, None for registration task
"""
self.data_path = data_path
self.phase = phase
self.transform = transform
#self.data_type = '*.nii.gz'
self.inverse_reg_direction = option[('inverse_reg_direction',False,'double the data via inverse registration order')]
""" inverse the registration order, i.e the original set is A->B, the new set would be A->B and B->A """
ind = ['train', 'val', 'test', 'debug'].index(phase)
max_num_for_loading=option['max_num_for_loading',(-1,-1,-1,-1),"the max number of pairs to be loaded, set -1 if there is no constraint,[max_train, max_val, max_test, max_debug]"]
self.max_num_for_loading = max_num_for_loading[ind]
""" the max number of pairs to be loaded into the memory,[max_train, max_val, max_test, max_debug]"""
self.load_init_weight=option[('load_init_weight',False,'load init weight for adaptive weighting model')]
self.get_file_list()
self.reg_option = option
self.img_after_resize = option[('img_after_resize',[-1,-1,-1],"resample the image into desired size")]
self.img_after_resize = None if any([sz == -1 for sz in self.img_after_resize]) else self.img_after_resize
self.normalize_via_percentage_clip = option[(
'normalize_via_percentage_clip', -1, "normalize the image via percentage clip, the given value is in [0-1]")]
self.normalize_via_range_clip = option[
('normalize_via_range_clip', (-1, -1), "normalize the image via range clip")]
load_training_data_into_memory = option[('load_training_data_into_memory',False,"when train network, load all training sample into memory can relieve disk burden")]
self.load_into_memory = load_training_data_into_memory if phase == 'train' else False
self.pair_list = []
self.original_spacing_list = []
self.original_sz_list = []
self.spacing_list = []
if self.load_into_memory:
self.init_img_pool()
def get_file_list(self):
"""
get the all files belonging to data_type from the data_path,
:return: full file path list, file name list
"""
if not os.path.exists(self.data_path):
self.path_list=[]
self.name_list=[]
self.init_weight_list=[]
return
self.path_list = read_txt_into_list(os.path.join(self.data_path,'pair_path_list.txt'))
pair_name_path = os.path.join(self.data_path, 'pair_name_list.txt')
if os.path.isfile(pair_name_path):
self.name_list = read_fname_list_from_pair_fname_txt(pair_name_path)
else:
name_list = [generate_pair_name([self.path_list[i][0],self.path_list[i][1]]) for i in range(len(self.path_list))]
write_list_into_txt(name_list,pair_name_path)
self.name_list =[name[0] for name in name_list]
if self.load_init_weight:
self.init_weight_list = read_txt_into_list(os.path.join(self.data_path,'pair_weight_path_list.txt'))
# if len(self.path_list[0])==4:
# self.has_label=True
read_num = min(self.max_num_for_loading, len(self.path_list))
if self.max_num_for_loading>0:
self.path_list = self.path_list[:read_num]
self.name_list = self.name_list[:read_num]
if self.load_init_weight:
self.init_weight_list = self.init_weight_list[:read_num]
if self.inverse_reg_direction and (self.phase=='train' or self.phase == 'test'): #self.phase =='test' and self.inverse_reg_direction:
path_list_inverse = []
for pair_path in self.path_list:
if len(pair_path)==4:
path_list_inverse.append([pair_path[1],pair_path[0], pair_path[3], pair_path[2]])
else:
path_list_inverse.append([pair_path[1], pair_path[0]])
try:
s_t_name_list = read_fname_list_from_pair_fname_txt(pair_name_path,detail=True)[:read_num]
name_list_inverse = [s_t[2]+"_"+s_t[1] for s_t in s_t_name_list]
except:
name_list_inverse = [self.__inverse_name(name) for name in self.name_list]
self.path_list += path_list_inverse
self.name_list += name_list_inverse
if self.load_init_weight:
init_weight_inverse =[[path[1],path[0]] for path in self.init_weight_list]
self.init_weight_list += init_weight_inverse
# if len(self.name_list)==0:
# self.name_list = ['pair_{}'.format(idx) for idx in range(len(self.path_list))]
def __read_img_label_into_zipnp(self,img_label_path_dic,img_label_dic):
pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(img_label_path_dic)).start()
count = 0
for fn, img_label_path in img_label_path_dic.items():
img_label_np_dic = {}
img_sitk, original_spacing, original_sz = self.__read_and_clean_itk_info(img_label_path['img'])
resized_img, resize_factor = self.resize_img(img_sitk)
img_np = sitk.GetArrayFromImage(resized_img)
img_label_np_dic['img'] = blosc.pack_array(img_np.astype(np.float32))
if 'label' in img_label_path:
label_sitk, _, _ = self.__read_and_clean_itk_info(img_label_path['label'])
resized_label,_ = self.resize_img(label_sitk,is_label=True)
label_np = sitk.GetArrayFromImage(resized_label)
img_label_np_dic['label'] = blosc.pack_array(label_np.astype(np.float32))
img_after_resize = self.img_after_resize if self.img_after_resize is not None else original_sz
new_spacing= original_spacing*(original_sz-1)/(np.array(img_after_resize)-1)
normalized_spacing = self._normalize_spacing(new_spacing,img_after_resize, silent_mode=True)
img_label_np_dic['original_sz'] =original_sz
img_label_np_dic['original_spacing'] = original_spacing
img_label_np_dic['spacing'] = normalized_spacing
img_label_dic[fn] =img_label_np_dic
count +=1
pbar.update(count)
pbar.finish()
def _normalize_spacing(self,spacing,sz,silent_mode=False):
"""
Normalizes spacing.
:param spacing: Vector with spacing info, in XxYxZ format
:param sz: size vector in XxYxZ format
:return: vector with normalized spacings in XxYxZ format
"""
dim = len(spacing)
# first determine the largest extent
current_largest_extent = -1
extent = np.zeros_like(spacing)
for d in range(dim):
current_extent = spacing[d]*(sz[d]-1)
extent[d] = current_extent
if current_extent>current_largest_extent:
current_largest_extent = current_extent
scalingFactor = 1./current_largest_extent
normalized_spacing = spacing*scalingFactor
normalized_extent = extent*scalingFactor
if not silent_mode:
print('Normalize spacing: ' + str(spacing) + ' -> ' + str(normalized_spacing))
print('Normalize spacing, extent: ' + str(extent) + ' -> ' + str(normalized_extent))
return normalized_spacing
def init_img_pool(self):
"""img pool shoudl include following thing:
img_label_path_dic:{img_name:{'img':img_fp,'label':label_fp,...}
img_label_dic: {img_name:{'img':img_np,'label':label_np},......}
pair_name_list:[[pair1_s,pair1_t],[pair2_s,pair2_t],....]
pair_list [[s_np,t_np,sl_np,tl_np],....]
only the pair_list need to be used by get_item method
"""
manager = Manager()
img_label_dic = manager.dict()
img_label_path_dic = {}
pair_name_list = []
for fps in self.path_list:
has_label = len(fps)==4
for i in range(2):
fp = fps[i]
fn = get_file_name(fp)
if fn not in img_label_path_dic:
if has_label:
img_label_path_dic[fn] = {'img':fps[i], 'label':fps[i+2]}
else:
img_label_path_dic[fn] = {'img':fps[i]}
pair_name_list.append([get_file_name(fps[0]), get_file_name(fps[1])])
num_of_workers = 12
num_of_workers = num_of_workers if len(self.name_list)>12 else 2
split_dict = self.__split_dict(img_label_path_dic,num_of_workers)
procs =[]
for i in range(num_of_workers):
p = Process(target=self.__read_img_label_into_zipnp,args=(split_dict[i], img_label_dic,))
p.start()
print("pid:{} start:".format(p.pid))
procs.append(p)
for p in procs:
p.join()
print("the loading phase finished, total {} img and labels have been loaded".format(len(img_label_dic)))
img_label_dic=dict(img_label_dic)
for pair_name in pair_name_list:
sn = pair_name[0]
tn = pair_name[1]
if 'label' in img_label_dic[sn]:
self.pair_list.append([img_label_dic[sn]['img'],img_label_dic[tn]['img'],
img_label_dic[sn]['label'],img_label_dic[tn]['label']])
else:
self.pair_list.append([img_label_dic[sn]['img'], img_label_dic[tn]['img']])
self.original_spacing_list.append(img_label_dic[sn]['original_spacing'])
self.original_sz_list.append(img_label_dic[sn]['original_sz'])
self.spacing_list.append(img_label_dic[sn]['spacing'])
def resize_img(self, img, is_label=False):
"""
:param img: sitk input, factor is the outputs_ize/patched_sized
:return:
"""
img_sz = img.GetSize()
if self.img_after_resize is not None:
img_after_resize = self.img_after_resize
else:
img_after_resize = np.flipud(img_sz)
resize_factor = np.array(img_after_resize)/np.flipud(img_sz)
spacing_factor = (np.array(img_after_resize)-1)/(np.flipud(img_sz)-1)
resize = not all([factor == 1 for factor in resize_factor])
if resize:
resampler= sitk.ResampleImageFilter()
dimension =3
factor = np.flipud(resize_factor)
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i]*factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] =1./ spacing_factor[0]
matrix[1, 1] =1./ spacing_factor[1]
matrix[2, 2] =1./ spacing_factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_label:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkBSpline)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
return img_resampled, resize_factor
def normalize_intensity(self, img):
"""
a numpy image, normalize into intensity [-1,1]
(img-img.min())/(img.max() - img.min())
:param img: image
:param percen_clip: Linearly normalized image intensities so that the 95-th percentile gets mapped to 0.95; 0 stays 0
:param range_clip: Linearly normalized image intensities from (range_clip[0], range_clip[1]) to 0,1
:return
"""
if self.normalize_via_percentage_clip > 0:
img = img - img.min()
normalized_img = img / np.percentile(img, 95) * 0.95
else:
range_clip = self.normalize_via_range_clip
if range_clip[0] < range_clip[1]:
img = np.clip(img, a_min=range_clip[0], a_max=range_clip[1])
min_intensity = img.min()
max_intensity = img.max()
normalized_img = (img - img.min()) / (max_intensity - min_intensity)
normalized_img = normalized_img * 2 - 1
return normalized_img
def __read_and_clean_itk_info(self,path):
if path is not None:
img = sitk.ReadImage(path)
spacing_sitk = img.GetSpacing()
img_sz_sitk = img.GetSize()
return sitk.GetImageFromArray(sitk.GetArrayFromImage(img)), np.flipud(spacing_sitk), np.flipud(img_sz_sitk)
else:
return None, None, None
def __read_itk_into_np(self,path):
return sitk.GetArrayFromImage(sitk.ReadImage(path))
def __split_dict(self,dict_to_split,split_num):
index_list = list(range(len(dict_to_split)))
index_split = np.array_split(np.array(index_list),split_num)
split_dict=[]
dict_to_split_items = list(dict_to_split.items())
for i in range(split_num):
dj=dict(dict_to_split_items[index_split[i][0]:index_split[i][-1]+1])
split_dict.append(dj)
return split_dict
def __inverse_name(self,name):
"""get the name of the inversed registration pair"""
name = name+'_inverse'
return name
# try:
# n_parts= name.split('_image_')
# inverse_name = n_parts[1]+'_'+n_parts[0]+'_image'
# return inverse_name
# except:
# n_parts = name.split('_brain_')
# inverse_name = n_parts[1] + '_' + n_parts[0] + '_brain'
# return inverse_name
def __len__(self):
return len(self.name_list)*500 if len(self.name_list)<200 and self.phase=='train' else len(self.name_list) #############################3
def __getitem__(self, idx):
"""
# todo update the load data part to mermaid fileio
:param idx: id of the items
:return: the processed data, return as type of dic
"""
# print(idx)
idx = idx %len(self.name_list)
pair_path = self.path_list[idx]
filename = self.name_list[idx]
has_label = len(self.path_list[idx])==4
if not self.load_into_memory:
img_spacing_pair_list = [ list(self.__read_and_clean_itk_info(pt)) for pt in pair_path]
sitk_pair_list = [item[0] for item in img_spacing_pair_list]
original_spacing = img_spacing_pair_list[0][1]
original_sz = img_spacing_pair_list[0][2]
sitk_pair_list[0], resize_factor = self.resize_img(sitk_pair_list[0])
sitk_pair_list[1], _ = self.resize_img(sitk_pair_list[1])
if has_label:
sitk_pair_list[2],_ = self.resize_img(sitk_pair_list[2], is_label=True)
sitk_pair_list[3],_ = self.resize_img(sitk_pair_list[3], is_label=True)
pair_list = [sitk.GetArrayFromImage(sitk_pair) for sitk_pair in sitk_pair_list]
pair_list = [item.astype(np.float32) for item in pair_list]
img_after_resize = self.img_after_resize if self.img_after_resize is not None else original_sz
new_spacing= original_spacing*(original_sz-1)/(np.array(img_after_resize)-1)
spacing = self._normalize_spacing(new_spacing,img_after_resize, silent_mode=True)
else:
zipnp_pair_list = self.pair_list[idx]
spacing = self.spacing_list[idx]
original_spacing = self.original_spacing_list[idx]
original_sz = self.original_sz_list[idx]
pair_list = [blosc.unpack_array(item) for item in zipnp_pair_list]
sample = {'image': np.asarray([self.normalize_intensity(pair_list[0]),self.normalize_intensity(pair_list[1])])}
sample['pair_path'] = pair_path
if self.load_init_weight:
sample['init_weight']=self.init_weight_list[idx]
if has_label:
try:
sample ['label']= np.asarray([pair_list[2], pair_list[3]]).astype(np.float32)
except:
print(pair_list[2].shape,pair_list[3].shape)
print(filename)
# else:
# sample['label'] = None
if self.transform:
sample['image'] = self.transform(sample['image'])
if has_label:
sample['label'] = self.transform(sample['label'])
sample['spacing'] = spacing.copy()
sample['original_sz'] = original_sz.copy()
sample['original_spacing'] = original_spacing.copy()
return sample,filename
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
n_tensor = torch.from_numpy(sample)
return n_tensor
| 17,753 | 44.175573 | 186 | py |
easyreg | easyreg-master/easyreg/create_model.py | import torch
def create_model(opt):
"""
create registration model object
:param opt: ParameterDict, task setting
:return: model object
"""
model = None
model_name = opt['tsk_set']['model']
gpu_id = opt['tsk_set']['gpu_ids']
sz = opt
# gpu_count = torch.cuda.device_count()
# print("Let's use", min(torch.cuda.device_count(), len(gpu_id) if gpu_id[0]!=-1 else 100), "GPUs!")
# if gpu_count > 0 and (len(gpu_id) > 1 or gpu_id[0] == -1):
# if len(gpu_id) > 1 and gpu_id[0] != -1:
# os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)[1:-1]
# else:
# torch.cuda.set_device(gpu_id[0])
if gpu_id>=0:
torch.cuda.set_device(gpu_id)
print(model_name)
################ models for registration ########################
if model_name == 'reg_net':
from .reg_net import RegNet
model = RegNet()
elif model_name == 'mermaid_iter':
from .mermaid_iter import MermaidIter
model = MermaidIter()
elif model_name == 'nifty_reg':
from .nifty_reg_iter import NiftyRegIter
model = NiftyRegIter()
elif model_name == 'ants':
from .ants_iter import AntsRegIter
model = AntsRegIter()
elif model_name == 'demons':
from .demons_iter import DemonsRegIter
model = DemonsRegIter()
elif model_name == 'seg_net':
from .seg_net import SegNet
model = SegNet()
else:
raise ValueError("Model [%s] not recognized." % model_name)
model.initialize(opt)
print("model [%s] was created" % (model.name()))
return model
| 1,615 | 33.382979 | 104 | py |
easyreg | easyreg-master/easyreg/net_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module
import numpy as np
import torch.nn.init as init
import os
from easyreg.reproduce_paper_results import reproduce_paper_result
dim = 3
Conv = nn.Conv2d if dim == 2 else nn.Conv3d
MaxPool = nn.MaxPool2d if dim == 2 else nn.MaxPool3d
ConvTranspose = nn.ConvTranspose2d if dim == 2 else nn.ConvTranspose3d
BatchNorm = nn.BatchNorm2d if dim == 2 else nn.BatchNorm3d
conv = F.conv2d if dim == 2 else F.conv3d
class conv_bn_rel(nn.Module):
"""
conv + bn (optional) + relu
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False,
bn=False, reverse=False, group=1, dilation=1):
super(conv_bn_rel, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
if not reverse:
self.conv = Conv(in_channels, out_channels, kernel_size, stride, padding=padding, groups=1, dilation=1)
else:
self.conv = ConvTranspose(in_channels, out_channels, kernel_size, stride, padding=padding, groups=1,
dilation=1)
self.bn = BatchNorm(out_channels) if bn else None #, eps=0.0001, momentum=0, affine=True
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
elif active_unit == 'leaky_relu':
self.active_unit = nn.LeakyReLU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class FcRel(nn.Module):
"""
fc+ relu(option)
"""
def __init__(self, in_features, out_features, active_unit='relu'):
super(FcRel, self).__init__()
self.fc = nn.Linear(in_features, out_features)
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.fc(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class Bilinear(Module):
"""
Spatial transform function for 1D, 2D, and 3D. In BCXYZ format (this IS the format used in the current toolbox).
"""
def __init__(self, zero_boundary=False, using_scale=True):
"""
Constructor
:param ndim: (int) spatial transformation of the transform
"""
super(Bilinear, self).__init__()
self.zero_boundary = 'zeros' if zero_boundary else 'border'
self.using_scale = using_scale
""" scale [-1,1] image intensity into [0,1], this is due to the zero boundary condition we may use here """
def forward_stn(self, input1, input2):
input2_ordered = torch.zeros_like(input2)
input2_ordered[:, 0, ...] = input2[:, 2, ...]
input2_ordered[:, 1, ...] = input2[:, 1, ...]
input2_ordered[:, 2, ...] = input2[:, 0, ...]
output = torch.nn.functional.grid_sample(input1, input2_ordered.permute([0, 2, 3, 4, 1]),
padding_mode=self.zero_boundary, align_corners=True)
return output
def forward(self, input1, input2):
"""
Perform the actual spatial transform
:param input1: image in BCXYZ format
:param input2: spatial transform in BdimXYZ format
:return: spatially transformed image in BCXYZ format
"""
if input1.shape[0] != input2.shape[0]:
n_batch = input1.shape[0]
input2 = input2[:n_batch]
if self.using_scale:
output = self.forward_stn((input1 + 1) / 2, input2)
# print(STNVal(output, ini=-1).sum())
return output * 2 - 1
else:
output = self.forward_stn(input1, input2)
# print(STNVal(output, ini=-1).sum())
return output
def identity_map_for_reproduce(sz):
"""
Returns an identity map. todo keep for preproduce result, this function will be disabled in the next release, replaced by spacing version
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[-1:1.:2. / sz[0]]
elif dim == 2:
id = np.mgrid[-1.:1.:2. / sz[0], -1.:1.:2. / sz[1]]
elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
id = np.mgrid[-1.:1.:2. / sz[0], -1.:1.:2. / sz[1], -1.:1.:2. / sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# id= id*2-1
return torch.from_numpy(id.astype(np.float32))
def identity_map(sz, dtype= np.float32):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0: sz[0]]
elif dim == 2:
id = np.mgrid[0: sz[0], 0: sz[1]]
elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
id = np.mgrid[0: sz[0], 0:sz[1], 0: sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
id = np.array(id.astype(dtype))
if dim == 1:
id = id.reshape(1, sz[0]) # add a dummy first index
spacing = 1./ (np.array(sz)-1)
for d in range(dim):
id[d] *= spacing[d]
id[d] = id[d]*2 - 1
return torch.from_numpy(id.astype(np.float32))
def not_normalized_identity_map(sz):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0: sz[0]]
elif dim == 2:
id = np.mgrid[0: sz[0], 0: sz[1]]
elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
id = np.mgrid[0: sz[0], 0:sz[1], 0: sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# id= id*2-1
return torch.from_numpy(id.astype(np.float32))
def gen_identity_map(img_sz, resize_factor=1.,normalized=True):
"""
given displacement field, add displacement on grid field todo now keep for reproduce this function will be disabled in the next release, replaced by spacing version
"""
if isinstance(resize_factor, list):
img_sz = [int(img_sz[i] * resize_factor[i]) for i in range(dim)]
else:
img_sz = [int(img_sz[i] * resize_factor) for i in range(dim)]
if normalized:
grid = identity_map(img_sz) if not reproduce_paper_result else identity_map_for_reproduce(img_sz)
else:
grid = not_normalized_identity_map(img_sz)
return grid
class AffineConstrain(object):
"""
regularization on affine parameters,
sched:
'l2': square loss
'det': determinant loss
"""
def __init__(self):
if dim == 3:
self.affine_identity = torch.zeros(12).cuda()
self.affine_identity[0] = 1.
self.affine_identity[4] = 1.
self.affine_identity[8] = 1.
else:
raise ValueError("Not Implemented")
def __call__(self, affine_param, sched='l2'):
if sched == 'l2':
return (self.affine_identity - affine_param) ** 2
elif sched == 'det':
mean_det = 0.
for i in range(affine_param.shape[0]):
affine_matrix = affine_param[i, :9].contiguous().view(3, 3)
mean_det += torch.det(affine_matrix)
return mean_det / affine_param.shape[0]
def clip_gradient(model, clip_norm):
"""Computes a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in model.parameters():
if p.requires_grad:
modulenorm = p.grad.data.norm()
totalnorm += modulenorm ** 2
totalnorm = np.sqrt(totalnorm)
norm = clip_norm / max(totalnorm, clip_norm)
for p in model.parameters():
if p.requires_grad:
p.grad.mul_(norm)
def space_normal(tensors, std=0.1):
"""
space normalize for the net kernel
:param tensor:
:param mean:
:param std:
:return:
"""
if isinstance(tensors, torch.Tensor):
space_normal(tensors.data, std=std)
return tensors
for n in range(tensors.size()[0]):
for c in range(tensors.size()[1]):
dim = tensors[n][c].dim()
sz = tensors[n][c].size()
mus = np.zeros(dim)
stds = std * np.ones(dim)
print('WARNING: What should the spacing be here? Needed for new identity map code')
raise ValueError('Double check the spacing here before running this code')
spacing = np.ones(dim)
centered_id = centered_identity_map(sz, spacing)
g = compute_normalized_gaussian(centered_id, mus, stds)
tensors[n, c] = torch.from_numpy(g)
def weights_init_uniform(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.uniform(m.weight.data, 0.038, 0.042)
elif classname.find('Linear') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
space_normal(m.weight.data)
elif classname.find('Linear') != -1:
space_normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_rd_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data)
elif classname.find('Linear') != -1:
init.normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'rd_normal':
net.apply(weights_init_rd_normal)
elif init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'uniform':
net.apply(weights_init_uniform)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def print_network(net):
""" print out the structure of the network"""
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def resume_train(model_path, model, optimizer):
"""
resume the training from checkpoint
:param model_path: the checkpoint path
:param model: the model to be set
:param optimizer: the optimizer to be set
:return:
"""
if os.path.isfile(model_path):
print("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path, map_location='cpu') # {'cuda:'+str(old_gpu):'cuda:'+str(cur_gpu)})
start_epoch = 0
best_prec1 = 0.0
load_only_one = False
if 'epoch' in checkpoint:
start_epoch = checkpoint['epoch'] + 1
print("the started epoch now is {}".format(start_epoch))
else:
start_epoch = 0
if 'best_loss' in checkpoint:
best_prec1 = checkpoint['best_loss']
else:
best_prec1 = 0.
if 'global_step' in checkpoint:
global_step = checkpoint['global_step']
else:
phases = ['train', 'val', 'debug']
global_step = {x: 0 for x in phases}
try:
model.load_state_dict(checkpoint['state_dict'])
print("=> succeed load model '{}'".format(model_path))
except:
############### TODO Currently not compatabile to enemble network ###############
print("Warning !!! Meet error is reading the whole model, now try to read the part")
model.load_state_dict(checkpoint['state_dict'], strict=False)
print(" The incomplelet model is succeed load from '{}'".format(model_path))
if 'optimizer' in checkpoint:
if not isinstance(optimizer, tuple):
try:
optimizer.load_state_dict(checkpoint['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
print("=> succeed load optimizer '{}'".format(model_path))
optimizer.zero_grad()
except:
print("Warning !!! Meet error during loading the optimize, not externaly initialized")
return start_epoch, best_prec1, global_step
else:
print("=> no checkpoint found at '{}'".format(model_path))
get_test_model = resume_train
def save_checkpoint(state, is_best, path, prefix, filename='checkpoint.pth.tar'):
"""
save checkpoint during training
'epoch': epoch,'
:param state_dict': {'epoch': epoch,'state_dict': model.network.state_dict(),'optimizer': optimizer_state,
'best_score': best_score, 'global_step':global_step}
:param is_best: if is the best model
:param path: path to save the checkpoint
:param prefix: prefix to add before the fname
:param filename: filename
:return:
"""
if not os.path.exists(path):
os.mkdir(path)
prefix_save = os.path.join(path, prefix)
name = '_'.join([prefix_save, filename])
try:
torch.save(state, name,_use_new_zipfile_serialization=False)
except:
torch.save(state, name)
if is_best:
torch.save(state, path + '/model_best.pth.tar')
def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
"""
Utility function for computing output of convolutions
takes a tuple of (h,w) and returns a tuple of (h,w)
"""
if type(h_w) is not tuple:
h_w = (h_w, h_w)
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
if type(stride) is not tuple:
stride = (stride, stride)
if type(pad) is not tuple:
pad = (pad, pad)
h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) // stride[0] + 1
w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) // stride[1] + 1
return h, w
def convtransp_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
"""
Utility function for computing output of transposed convolutions
takes a tuple of (h,w) and returns a tuple of (h,w)
"""
if type(h_w) is not tuple:
h_w = (h_w, h_w)
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
if type(stride) is not tuple:
stride = (stride, stride)
if type(pad) is not tuple:
pad = (pad, pad)
h = (h_w[0] - 1) * stride[0] - 2 * pad[0] + kernel_size[0] + pad[0]
w = (h_w[1] - 1) * stride[1] - 2 * pad[1] + kernel_size[1] + pad[1]
return h, w | 17,530 | 33.107004 | 172 | py |
easyreg | easyreg-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# pytorchRegistration documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 29 08:41:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import glob
import shutil
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import StringList
from docutils import nodes
import re
import sphinx_gallery
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class IncludeDirective(Directive):
"""Include source file without docstring at the top of file.
Implementation just replaces the first docstring found in file
with '' once.
Example usage:
.. includenodoc:: /beginner/examples_tensor/two_layer_net_tensor.py
"""
# defines the parameter the directive expects
# directives.unchanged means you get the raw value from RST
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = False
add_index = False
docstring_pattern = r'"""(?P<docstring>(?:.|[\r\n])*?)"""\n'
docstring_regex = re.compile(docstring_pattern)
def run(self):
document = self.state.document
env = document.settings.env
rel_filename, filename = env.relfn2path(self.arguments[0])
try:
text = open(filename).read()
text_no_docstring = self.docstring_regex.sub('', text, count=1)
code_block = nodes.literal_block(text=text_no_docstring)
return [code_block]
except FileNotFoundError as e:
print(e)
return []
class GalleryItemDirective(Directive):
"""
Create a sphinx gallery thumbnail for insertion anywhere in docs.
Optionally, you can specify the custom figure and intro/tooltip for the
thumbnail.
Example usage:
.. galleryitem:: intermediate/char_rnn_generation_tutorial.py
:figure: _static/img/char_rnn_generation.png
:intro: Put your custom intro here.
If figure is specified, a thumbnail will be made out of it and stored in
_static/thumbs. Therefore, consider _static/thumbs as a 'built' directory.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'figure': directives.unchanged,
'intro': directives.unchanged}
has_content = False
add_index = False
def run(self):
args = self.arguments
fname = args[-1]
env = self.state.document.settings.env
fname, abs_fname = env.relfn2path(fname)
basename = os.path.basename(fname)
dirname = os.path.dirname(fname)
try:
if 'intro' in self.options:
intro = self.options['intro'][:195] + '...'
else:
_, blocks = sphinx_gallery.gen_rst.split_code_and_text_blocks(abs_fname)
intro, _ = sphinx_gallery.gen_rst.extract_intro_and_title(abs_fname, blocks[0][1])
thumbnail_rst = sphinx_gallery.backreferences._thumbnail_div(
dirname, basename, intro)
if 'figure' in self.options:
rel_figname, figname = env.relfn2path(self.options['figure'])
save_figname = os.path.join('_static/thumbs/',
os.path.basename(figname))
try:
os.makedirs('_static/thumbs')
except OSError:
pass
sphinx_gallery.gen_rst.scale_image(figname, save_figname,
400, 280)
# replace figure in rst with simple regex
thumbnail_rst = re.sub(r'..\sfigure::\s.*\.png',
'.. figure:: /{}'.format(save_figname),
thumbnail_rst)
thumbnail = StringList(thumbnail_rst.split('\n'))
thumb = nodes.paragraph()
self.state.nested_parse(thumbnail, self.content_offset, thumb)
return [thumb]
except FileNotFoundError as e:
print(e)
return []
GALLERY_TEMPLATE = """
.. raw:: html
<div class="sphx-glr-thumbcontainer" tooltip="{tooltip}">
.. only:: html
.. figure:: {thumbnail}
{description}
.. raw:: html
</div>
"""
class CustomGalleryItemDirective(Directive):
"""Create a sphinx gallery style thumbnail.
tooltip and figure are self explanatory. Description could be a link to
a document like in below example.
Example usage:
.. customgalleryitem::
:tooltip: I am writing this tutorial to focus specifically on NLP for people who have never written code in any deep learning framework
:figure: /_static/img/thumbnails/babel.jpg
:description: :doc:`/beginner/deep_learning_nlp_tutorial`
If figure is specified, a thumbnail will be made out of it and stored in
_static/thumbs. Therefore, consider _static/thumbs as a 'built' directory.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'tooltip': directives.unchanged,
'figure': directives.unchanged,
'description': directives.unchanged}
has_content = False
add_index = False
def run(self):
try:
if 'tooltip' in self.options:
tooltip = self.options['tooltip'][:195] + '...'
else:
raise ValueError('tooltip not found')
if 'figure' in self.options:
env = self.state.document.settings.env
rel_figname, figname = env.relfn2path(self.options['figure'])
thumbnail = os.path.join('_static/thumbs/', os.path.basename(figname))
try:
os.makedirs('_static/thumbs')
except FileExistsError:
pass
sphinx_gallery.gen_rst.scale_image(figname, thumbnail, 400, 280)
else:
thumbnail = '_static/img/thumbnails/default.png'
if 'description' in self.options:
description = self.options['description']
else:
raise ValueError('description not doc found')
except FileNotFoundError as e:
print(e)
return []
except ValueError as e:
print(e)
raise
return []
thumbnail_rst = GALLERY_TEMPLATE.format(tooltip=tooltip,
thumbnail=thumbnail,
description=description)
thumbnail = StringList(thumbnail_rst.split('\n'))
thumb = nodes.paragraph()
self.state.nested_parse(thumbnail, self.content_offset, thumb)
return [thumb]
#from .custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0,os.path.abspath('../../easyreg'))
#sys.path.insert(0,os.path.abspath('../../easyreg/libraries'))
#sys.path.insert(0,os.path.abspath('../..'))
sys.path.insert(0,os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel',
'sphinx.ext.napoleon',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'nbsphinx',
#'sphinx_gallery.gen_gallery'
]
# generate autosummary even if no references
autosummary_generate = True
#
# sphinx_gallery_conf = {
# 'examples_dirs': ['../../demos', '../../jupyter'], # path to your example scripts
# 'filename_pattern': '/example_',
# # directory where function granular galleries are stored
# 'backreferences_dir': False,
# 'ignore_pattern': r'__init__\.py',
# 'gallery_dirs': ['auto_demos', 'auto_jupyter'] # path where to save gallery generated examples
# }
#
# for i in range(len(sphinx_gallery_conf['examples_dirs'])):
# gallery_dir = sphinx_gallery_conf['gallery_dirs'][i]
# source_dir = sphinx_gallery_conf['examples_dirs'][i]
# # Create gallery dirs if it doesn't exist
# try:
# os.mkdir(gallery_dir)
# except OSError:
# pass
#
# # Copy rst files from source dir to gallery dir
# for f in glob.glob(os.path.join(source_dir, '*.rst')):
# shutil.copy(f, gallery_dir)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'easyreg'
copyright = u'2018, Zhengyang Shen'
author = u'Zhengyang Shen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_member_order = 'bysource'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
#html_theme = 'classic'
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes',]
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'logo_only' : True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'easyregdoc'
html_logo = 'easyreg-logo.png'
# -- Options for LaTeX output ---------------------------------------------
latex_engine = 'xelatex'
latex_show_urls = 'footnote'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'easyreg.tex', u'easyreg Documentation',
u'Marc Niethammer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'easyreg', u'easyreg Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'easyreg', u'easyreg Documentation',
author, 'easyreg', 'Image registration via automatic differentiation', 'Miscellaneous'),
]
autosectionlabel_prefix_document = True
def setup(app):
# Custom CSS
# app.add_stylesheet('css/pytorch_theme.css')
# app.add_stylesheet('https://fonts.googleapis.com/css?family=Lato')
# Custom directives
app.add_directive('includenodoc', IncludeDirective)
#app.add_directive('galleryitem', GalleryItemDirective)
app.add_directive('customgalleryitem', CustomGalleryItemDirective)
| 13,865 | 30.585421 | 143 | py |
point_clouds_registration_benchmark | point_clouds_registration_benchmark-master/devel/generate_pcd_kaist.py | import argparse
import os
import sys
sys.path.append("..")
sys.path.append(".")
import h5py
import numpy as np
import torch
import open3d as o3
from tqdm import tqdm
def load_cloud(stamp, folder, pose, calib_pose):
pc_file = os.path.join(folder, f'{stamp}.bin')
pc = np.fromfile(pc_file, dtype=np.float32)
pc = pc.reshape(-1, 4)
intensity = pc[:, 3].copy()
intensity /= 238.
pc[:, 3] = 1.
pc_rot = np.matmul(calib_pose, pc.T)
pc_rot = np.matmul(pose, pc_rot)
pc_rot = pc_rot.astype(np.float).T.copy()
pcl_local = o3.PointCloud()
pcl_local.points = o3.Vector3dVector(pc_rot[:, :3])
pcl_local.colors = o3.Vector3dVector(np.vstack((intensity, intensity, intensity)).T)
return pcl_local
parser = argparse.ArgumentParser()
parser.add_argument('--sequence', default='00',
help='sequence')
parser.add_argument('--device', default='cuda',
help='device')
args = parser.parse_args()
sequence = args.sequence
print("Sequence: ", sequence)
base_folder = '/home/simone/Downloads/kaist'
base_folder = os.path.join(base_folder, sequence)
velodyne_folder = os.path.join(base_folder, 'sensor_data')
pose_file = os.path.join(base_folder, 'global_pose.csv')
#[13:39:33] [LAS] Cloud has been recentered! Translation: (-332466.25 ; -4140619.12 ; -19.99)
poses = {}
with open(pose_file, 'r') as f:
for x in f:
x = x.split(',')
pose_stamp = int(x[0])
RT = torch.zeros([4, 4])
for i in range(1, 13):
x[i] = float(x[i])
RT[0, 0] = x[1]
RT[0, 1] = x[2]
RT[0, 2] = x[3]
RT[0, 3] = x[4] - 332466.25
RT[1, 0] = x[5]
RT[1, 1] = x[6]
RT[1, 2] = x[7]
RT[1, 3] = x[8] - 4140619.12
RT[2, 0] = x[9]
RT[2, 1] = x[10]
RT[2, 2] = x[11]
RT[2, 3] = x[12] - 19.99
RT[3, 3] = 1.0
poses[pose_stamp] = RT.clone()
first_frame = 0
vlp_left_folder = os.path.join(velodyne_folder, 'VLP_left')
vlp_left_stamps_file = os.path.join(velodyne_folder, 'VLP_left_stamp.csv')
vlp_left_stamps = []
with open(vlp_left_stamps_file, 'r') as f:
for x in f:
vlp_left_stamps.append(int(x))
last_frame = len(vlp_left_stamps)
vlp_left_calib = os.path.join(base_folder, 'calibration', 'Vehicle2LeftVLP.txt')
v2vlpleft = torch.zeros((4, 4))
v2vlpleft[3, 3] = 1.
with open(vlp_left_calib, 'r') as f:
for x in f:
if x.startswith('R: '):
x = x[3:]
x = x.split(' ')
for i in range(9):
x[i] = float(x[i])
v2vlpleft[0, 0] = x[0]
v2vlpleft[0, 1] = x[1]
v2vlpleft[0, 2] = x[2]
v2vlpleft[1, 0] = x[3]
v2vlpleft[1, 1] = x[4]
v2vlpleft[1, 2] = x[5]
v2vlpleft[2, 0] = x[6]
v2vlpleft[2, 1] = x[7]
v2vlpleft[2, 2] = x[8]
elif x.startswith('T: '):
x = x[3:]
x = x.split(' ')
for i in range(3):
x[i] = float(x[i])
v2vlpleft[0, 3] = x[0]
v2vlpleft[1, 3] = x[1]
v2vlpleft[2, 3] = x[2]
v2vlpleft = v2vlpleft.numpy()
vlp_right_folder = os.path.join(velodyne_folder, 'VLP_right')
vlp_right_stamps_file = os.path.join(velodyne_folder, 'VLP_right_stamp.csv')
vlp_right_stamps = []
with open(vlp_right_stamps_file, 'r') as f:
for x in f:
vlp_right_stamps.append(int(x))
last_frame = min(len(vlp_right_stamps), len(vlp_left_stamps))
vlp_right_calib = os.path.join(base_folder, 'calibration', 'Vehicle2RightVLP.txt')
v2vlpright = torch.zeros((4, 4))
v2vlpright[3, 3] = 1.
with open(vlp_right_calib, 'r') as f:
for x in f:
if x.startswith('R: '):
x = x[3:]
x = x.split(' ')
for i in range(9):
x[i] = float(x[i])
v2vlpright[0, 0] = x[0]
v2vlpright[0, 1] = x[1]
v2vlpright[0, 2] = x[2]
v2vlpright[1, 0] = x[3]
v2vlpright[1, 1] = x[4]
v2vlpright[1, 2] = x[5]
v2vlpright[2, 0] = x[6]
v2vlpright[2, 1] = x[7]
v2vlpright[2, 2] = x[8]
elif x.startswith('T: '):
x = x[3:]
x = x.split(' ')
for i in range(3):
x[i] = float(x[i])
v2vlpright[0, 3] = x[0]
v2vlpright[1, 3] = x[1]
v2vlpright[2, 3] = x[2]
v2vlpright = v2vlpright.numpy()
first_pose_stamp = min(poses)
for left_stamp in tqdm(vlp_left_stamps):
if left_stamp >= first_pose_stamp:
left_pose = poses[min(poses, key=lambda x: abs(x-left_stamp))]
left_pose = left_pose.numpy()
left_cloud = load_cloud(left_stamp, vlp_left_folder,left_pose, v2vlpleft)
right_stamp = min(vlp_right_stamps, key = lambda x: abs(x-left_stamp))
right_pose = poses[min(poses, key=lambda x: abs(x-right_stamp))]
right_pose = right_pose.numpy()
right_cloud = load_cloud(right_stamp, vlp_right_folder, right_pose, v2vlpright)
left_cloud.points.extend(right_cloud.points)
left_cloud.colors.extend(right_cloud.colors)
o3.write_point_cloud(f'{base_folder}/{sequence}_{left_stamp}.pcd', left_cloud)
| 5,242 | 29.841176 | 93 | py |
SPARQA | SPARQA-master/code/common/bert_args.py |
class BertArgs():
def __init__(self, root, mode):
# uncased model
self.bert_base_uncased_model = root + '/pre_train_models/bert-base-uncased.tar.gz'
self.bert_base_uncased_tokenization = root + '/pre_train_models/bert-base-uncased-vocab.txt'
# cased model
self.bert_base_cased_model = root + '/pre_train_models/bert-base-cased.tar.gz'
self.bert_base_cased_tokenization = root + '/pre_train_models/bert-base-cased-vocab.txt'
if mode == 'cwq':
self.get_cwq_args(root=root)
elif mode == 'graphq':
self.get_graphq_args(root=root)
else:
pass
def get_cwq_args(self, root):
root = root + '/dataset_cwq_1_1/fine_tuning_models_cwq_0831/'
self.fine_tuning_headword_squad_F_model = root + 'debug_cwq_headwords_0831_squad_F/pytorch_model.bin'
self.fine_tuning_token_classifier_C_model = root + 'debug_cwq_node_classifier_0831_C/pytorch_model.bin'
self.fine_tuning_redundancy_span_D_model = root + 'debug_cwq_redundancy_0831_D/pytorch_model.bin'
self.fine_tuning_relation_classifier_E_model = root + 'debug_cwq_relation_classifier_0831_E/pytorch_model.bin'
self.fine_tuning_sequence_classifier_B_model = root + 'debug_cwq_simplification_0831_B/pytorch_model.bin'
def get_graphq_args(self, root):
root = root + '/dataset_graphquestions/fine_tuning_models_graphq_0905/'
self.fine_tuning_headword_squad_F_model = root + 'debug_headwords_0905_squad_F/pytorch_model.bin'
self.fine_tuning_token_classifier_C_model = root + 'debug_node_3_0905_C/pytorch_model.bin'
self.fine_tuning_redundancy_span_D_model = root + 'debug_redundancy_0905_D/pytorch_model.bin'
self.fine_tuning_relation_classifier_E_model = root + 'debug_relation_classifier_0905_E/pytorch_model.bin'
self.fine_tuning_sequence_classifier_B_model = root + 'debug_simplification_0905_B/pytorch_model.bin'
| 1,967 | 58.636364 | 118 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert_interface/token_classifier_interface.py | from parsing.parsing_args import bert_args
from parsing.models.fine_tuning_based_on_bert.run_token_classifier import NodeRecogniationProcessor, convert_example_to_features_for_test
from parsing.models import model_utils
import sys
import os
import random
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from parsing.models.pytorch_pretrained_bert.tokenization import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForTokenClassification
#------------------------------------------------
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
# print (sys.path)
os.environ['CUDA_VISIBLE_DEVICES']='2'
#------------------------------------------------
task_name = 'node_recognition'
args = model_utils.run_token_classifier_get_local_args()
processors = {
"node_recognition": NodeRecogniationProcessor,
}
num_labels_task = {
"node_recognition": 7 ,#len(processor.get_labels()),
}
processor = processors[task_name]()
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
label_ids_map = {label: i for i, label in enumerate(label_list)}
ids_label_map = {i: label for i, label in enumerate(label_list)}
tokenizer = BertTokenizer.from_pretrained(bert_args.bert_base_cased_tokenization, do_lower_case=args.do_lower_case)
model_state_dict = torch.load(bert_args.fine_tuning_token_classifier_C_model, map_location='cpu')
model = BertForTokenClassification.from_pretrained(bert_args.bert_base_cased_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
def process(sequence):
eval_example = processor.get_sequence_example(sequence)
eval_features, new_labels_temp = convert_example_to_features_for_test(eval_example, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
result_sequence = None
for input_ids, input_mask, segment_ids in eval_dataloader: #8*128
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = logits.detach().cpu().numpy() #[batch_size, sequence_length, num_labels]. #2*128*label_num_size
#---------------------------------------------
outputs = np.argmax(logits, axis=2)
[rows, _] = outputs.shape
for x_axis in range(rows):
sequence_output = outputs[x_axis]
result_sequence = model_utils.ner_prediction_sequence(ids_label_map=ids_label_map, outputs=sequence_output)
return model_utils.ner_postprocess(result_sequence, new_labels_temp)
if __name__ == "__main__":
print(process('Where was the main artist featured in the Rihanna : Live in Concert Tour raised ?'))
####################################
# "headword": 5 # task_name = 'headword' # "headword": HeadwordProcessor
| 3,666 | 46.623377 | 137 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert_interface/simplif_classifier_interface.py | import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from parsing.models import model_utils
from parsing.models.fine_tuning_based_on_bert.run_sequence_classifier import \
SimplificationQuestionProcessor, convert_examples_to_features
from parsing.models.pytorch_pretrained_bert import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForSequenceClassification
from parsing.parsing_args import bert_args
num_labels_task = {"simplification":2}
processors = {"simplification": SimplificationQuestionProcessor}
task_name = "simplification"
args = model_utils.run_sequence_classifier_get_local_args()
processor = processors[task_name]()
label_list = processor.get_labels()
num_labels = num_labels_task[task_name]
tokenizer = BertTokenizer.from_pretrained(bert_args.bert_base_cased_tokenization, do_lower_case=args.do_lower_case)
# label_ids_map = {label: i for i, label in enumerate(label_list)}
ids_label_map = {i: label for i, label in enumerate(label_list)}
bert_fine_tuning_filepath = bert_args.fine_tuning_sequence_classifier_B_model
model_state_dict = torch.load(bert_fine_tuning_filepath, map_location='cpu')
model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
model.to(device)
def process(line_a, line_b=None):
eval_examples = processor.get_simple_examples(line_a=line_a, line_b=line_b)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
outputs = []
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
outputs = np.argmax(logits, axis=1)
return int(ids_label_map[outputs[0]])
#ids_label_map[outputs[0]], logits #path match: logits[0][0] paraphrase:logits[0][1]
if __name__ == "__main__":
classifier_label = process(line_a='What \'s the currency called')
print(classifier_label)
| 2,937 | 51.464286 | 122 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert_interface/paraphrase_classifier_interface.py | import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from parsing.models import model_utils
from parsing.models.fine_tuning_based_on_bert.run_sequence_classifier import ParaphraseProcess, convert_examples_to_features
from parsing.models.pytorch_pretrained_bert import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForSequenceClassification
from parsing.parsing_args import bert_args
num_labels_task = {"paraphrase":2}
processors = {"paraphrase": ParaphraseProcess}
task_name = "paraphrase"
args = model_utils.run_sequence_classifier_get_local_args()
processor = processors[task_name]()
label_list = processor.get_labels()
num_labels = num_labels_task[task_name]
tokenizer = BertTokenizer.from_pretrained(bert_args.bert_base_cased_tokenization, do_lower_case=args.do_lower_case)
# label_ids_map = {label: i for i, label in enumerate(label_list)}
ids_label_map = {i: label for i, label in enumerate(label_list)}
bert_fine_tuning_filepath = bert_args.fine_tuning_paraphrase_classifier_G_model
model_state_dict = torch.load(bert_fine_tuning_filepath, map_location='cpu')
model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
model.to(device)
def process(line_a, line_b=None):
eval_examples = processor.get_simple_examples(line_a=line_a, line_b=line_b)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
outputs = []
logits = None
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
# print (logits) #[[ 0.6952358 -1.1702355 -1.8215117 -1.1374489 3.9811454 -0.49839512]]
outputs = np.argmax(logits, axis=1)
return logits, outputs[0] #paraphrase:logits[0][1]
if __name__ == "__main__":
paraphrase_label = process(line_a='river end where originates Lake Itasca', line_b='outflow lake containedby partially location')
print(paraphrase_label)
| 2,998 | 51.614035 | 133 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert_interface/redundancy_span_interface.py | import torch
from tqdm import tqdm
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from parsing.models.pytorch_pretrained_bert.tokenization import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForQuestionAnswering
from parsing.models.fine_tuning_based_on_bert.run_redundancy_span import read_one_example, \
convert_examples_to_features, RawResult, write_span_headwords_with_nbest
from parsing.models import model_utils
from parsing.parsing_args import bert_args
model_file = bert_args.fine_tuning_redundancy_span_D_model
args = model_utils.run_redundancy_span_get_local_args()
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
model_state_dict = torch.load(model_file, map_location='cpu')
model = BertForQuestionAnswering.from_pretrained(args.bert_model, state_dict=model_state_dict)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
model.to(device)
def simple_process(sequence):
'''process one sequence, such as question'''
eval_examples = read_one_example(one_line=sequence)
eval_features = convert_examples_to_features(examples=eval_examples, tokenizer=tokenizer,
max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits))
span, nbest_json = write_span_headwords_with_nbest(
eval_examples, eval_features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, args.verbose_logging)
return span, nbest_json
| 3,128 | 58.037736 | 134 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert_interface/sequences_classifier_interface.py | import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from parsing.models import model_utils
from parsing.models.fine_tuning_based_on_bert.run_sequence_classifier import SequencesRelationProcess, ParaphraseProcess,\
SimplificationQuestionProcessor, convert_examples_to_features
from parsing.models.pytorch_pretrained_bert import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForSequenceClassification
from parsing.parsing_args import bert_args
num_labels_task = {"sequences_relation":7, "simplification":2, "paraphrase":2}
# "path_match": PathMatchProcess, "path_match":2 ,
processors = {"sequences_relation": SequencesRelationProcess, "simplification": SimplificationQuestionProcessor, "paraphrase": ParaphraseProcess}
task_name = "sequences_relation"
# task_name = "path_match"
# task_name = "simplification"
# task_name = "paraphrase"
args = model_utils.run_sequence_classifier_get_local_args()
processor = processors[task_name]()
label_list = processor.get_labels()
num_labels = num_labels_task[task_name]
tokenizer = BertTokenizer.from_pretrained(bert_args.bert_base_cased_tokenization, do_lower_case=args.do_lower_case)
# label_ids_map = {label: i for i, label in enumerate(label_list)}
ids_label_map = {i: label for i, label in enumerate(label_list)}
bert_fine_tuning_filepath = None
if task_name == 'path_match':
bert_fine_tuning_filepath = bert_args.fine_tuning_path_match_classifier_model
elif task_name == 'paraphrase':
bert_fine_tuning_filepath = bert_args.fine_tuning_paraphrase_classifier_model
elif task_name == 'sequences_relation':
bert_fine_tuning_filepath = bert_args.fine_tuning_relation_classifier_E_model
elif task_name == 'simplification':
bert_fine_tuning_filepath = bert_args.fine_tuning_sequence_classifier_B_model
else:
pass
model_state_dict = torch.load(bert_fine_tuning_filepath, map_location='cpu')
model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
model.to(device)
def process(line_a, line_b=None):
eval_examples = processor.get_simple_examples(line_a=line_a, line_b=line_b)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
outputs = []
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
# print (logits) #[[ 0.6952358 -1.1702355 -1.8215117 -1.1374489 3.9811454 -0.49839512]]
outputs = np.argmax(logits, axis=1)
# print (outputs) #[4]
return ids_label_map[outputs[0]] #ids_label_map[outputs[0]], logits
# return int(ids_label_map[outputs[0]]) #ids_label_map[outputs[0]], logits
# #path match:logits[0][0] paraphrase:logits[0][1]
if __name__ == "__main__":
relation_classifier_label = process(line_a='what was disability ?', line_b='fdr \'s')
print(relation_classifier_label)
# classifier_label = process(line_a='What do the people who worship at Titular Church call their God ?')
# print(classifier_label)
# path_match_classifier_label = process(line_a='involved terrorist organizations', line_b='organizations involved terrorist organization')
# print(int(path_match_classifier_label))
# paraphrase_label = process(line_a='river end where originates Lake Itasca', line_b='outflow lake containedby partially location')
# print(paraphrase_label)
| 4,410 | 52.792683 | 145 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert_interface/headword_span_interface.py | import torch
from tqdm import tqdm
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from parsing.models.pytorch_pretrained_bert.tokenization import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForQuestionAnswering
from parsing.models.fine_tuning_based_on_bert.run_headword_span \
import read_one_example, convert_examples_to_features, RawResult, write_span_headwords_with_nbest
from parsing.models import model_utils
from parsing.parsing_args import bert_args
args = model_utils.run_redundancy_span_get_local_args()
model_file = bert_args.fine_tuning_headword_squad_F_model
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
model_state_dict = torch.load(model_file, map_location='cpu')
model = BertForQuestionAnswering.from_pretrained(args.bert_model, state_dict=model_state_dict)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
model.to(device)
def simple_process(question, span):
'''process one sequence, such as question'''
eval_examples = read_one_example(paragraph=question, question=span)
eval_features = convert_examples_to_features(examples=eval_examples, tokenizer=tokenizer,
max_seq_length=args.max_seq_length, doc_stride=args.doc_stride,
max_query_length=args.max_query_length, is_training=False)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
# logger.info("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits))
span, nbest_json = write_span_headwords_with_nbest(eval_examples, eval_features, all_results,
args.n_best_size, args.max_answer_length, args.do_lower_case, args.verbose_logging)
return span, nbest_json
if __name__ == "__main__":
span, nbest_json = simple_process(question='What is the language spoken in Switzerland that is mostly spoken in Italy ?', span='in Italy')
print (span)
# print (nbest_json) [OrderedDict([('text', 'owner'), ('text_index', 3), ('probability', 0.7110507253704911),
# ('start_logit', -7.255896091461182), ('end_logit', -6.328417778015137)]),
# OrderedDict([('text', 'station'), ('text_index', 6), ('probability', 0.22272930211485184),
| 3,612 | 60.237288 | 142 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert_interface/joint_three_models_interface.py | import torch
from tqdm import tqdm
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from parsing.models.pytorch_pretrained_bert.tokenization import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForSpanWithHeadwordWithLabel
from parsing.models import model_utils
from parsing.models.fine_tuning_based_on_bert.run_joint_three_models import read_one_example, \
convert_examples_to_features, RawResult, write_span_headwords_with_nbest
from parsing.parsing_args import bert_args
labels_list = ["nmod", "conj", "acl:cl", "acl", "nmod:poss", "advcl", "xcomp"]
ids_label_map = {i: label for i, label in enumerate(labels_list)}
num_labels = len(labels_list)
args = model_utils.run_joint_three_models_get_local_args()
model_file = bert_args.fine_tuning_joint_threemodels_A_model
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
model_state_dict = torch.load(model_file, map_location='cpu')
model = BertForSpanWithHeadwordWithLabel.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
model.to(device)
def simple_process(paragraph):
'''process one sequence, such as question'''
eval_examples = read_one_example(paragraph)
eval_features = convert_examples_to_features(
examples=eval_examples, label_list=labels_list,
tokenizer=tokenizer, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, max_query_length=args.max_query_length,
is_training=False)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits, batch_headword_logits, batch_label_logits \
= model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
headword_logits = batch_headword_logits[i].detach().cpu().tolist()
label_logits = batch_label_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
# print('#token to orig map:\t', eval_feature.token_to_orig_map)
# print('#tokens:\t', eval_feature.tokens)
# print('#doc token:\t', eval_feature.doc_tokens)
all_results.append(RawResult(
unique_id=unique_id, start_logits=start_logits, end_logits=end_logits, headword_logits=headword_logits, label_logits=label_logits))
span, headword, label_id, nbest_json = write_span_headwords_with_nbest(
eval_examples, eval_features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, args.verbose_logging)
return span, headword, ids_label_map[int(label_id)], nbest_json
if __name__ == "__main__":
question = 'In which city is the headquarter of Air China ?'
question = 'What Greek Mythology movie does Logan Lerman play in ?'
span, headword, label_id, nbest_json = simple_process(paragraph=question)
print (question)
print (span) #of Air China
print (headword) #5
print (label_id) #nmod
| 4,168 | 56.902778 | 147 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert/run_sequence_classifier.py | """BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import random
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import sys
import os
#-----------------------------------
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
os.environ['CUDA_VISIBLE_DEVICES']='0,1,2'
#------------------------------------------------
from parsing.models.pytorch_pretrained_bert import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForSequenceClassification
from parsing.models.pytorch_pretrained_bert.optimization import BertAdam
from parsing.models.pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from parsing.models import model_utils
###############################################
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
###############################################
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SimplificationQuestionProcessor(DataProcessor):
"""Processor for the Simplification question data set"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def get_simple_examples(self, line_a, line_b=None):
"""See base class."""
return self._create_examples_simple(line_a, "test")
def _create_examples_simple(self, line, set_type):
"""Creates examples for the training and dev sets."""
examples = []
guid = "%s-%s" % (set_type, 0)
text_a = line
label = '1'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class SequencesRelationProcess(DataProcessor):
"""Processor for the sequences relation classifier data set"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
# return ["compound", "coordination", "aggregationconstraint", "tripleconstaint", "other"]
# return ["pp", "cc", "acl:cl", "acl", "pos", "advcl"]
# return ["nmod", "conj", "acl:cl", "acl", "nmod:pos", "advcl"]
return ["nmod", "conj", "acl:cl", "acl", "nmod:poss", "advcl", "xcomp"]
def get_simple_examples(self, line_a, line_b):
"""See base class."""
return self._create_examples_simple(line_a, line_b, "test")
def _create_examples_simple(self, line_a, line_b, set_type):
"""Creates examples for the training and dev sets."""
examples = []
guid = "%s-%s" % (set_type, 0)
text_a = line_a
text_b = line_b
label = 'nmod'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets.
what is the name of the director of computer ? apricot compound
"""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = line[1]
label = line[3]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ParaphraseProcess(DataProcessor):
"""Processor for the sequences relation classifier data set"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def get_simple_examples(self, line_a, line_b):
"""See base class."""
return self._create_examples_simple(line_a, line_b, "test")
def _create_examples_simple(self, line_a, line_b, set_type):
"""Creates examples for the training and dev sets."""
examples = []
guid = "%s-%s" % (set_type, 0)
text_a = line_a
text_b = line_b
label = '0'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets.
what is the name of the director of computer ? apricot compound"""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = line[1]
label = line[2]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
'''
class PathMatchProcess(DataProcessor):
"""Processor for the path match classifier data set"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["1", "0"]
def get_simple_examples(self, line_a, line_b):
"""See base class."""
return self._create_examples_simple(line_a, line_b, "test")
def _create_examples_simple(self, line_a, line_b, set_type):
"""Creates examples for the training and dev sets."""
examples = []
guid = "%s-%s" % (set_type, 0)
text_a = line_a
text_b = line_b
label = '0'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets.
what is the name of the director of computer ? apricot compound
"""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = line[1]
label = line[2]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
'''
###############################################
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
model_utils._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask,
segment_ids=segment_ids, label_id=label_id))
return features
###############################################
def main(args=None):
if args is None:
args = model_utils.run_sequence_classifier_get_local_args()
processors = {
# "cola": ColaProcessor,
# "mnli": MnliProcessor,
# "mrpc": MrpcProcessor,
# "headword_direction": HeadwordDirectionProcess,
# "path_match": PathMatchProcess,
"simplification": SimplificationQuestionProcessor,
"sequences_relation": SequencesRelationProcess,
"paraphrase": ParaphraseProcess
}
num_labels_task = {
# "cola": 2,
# "mnli": 3,
# "mrpc": 2,
# "headword_direction" : 2,
# "path_match": 2,
"simplification" : 2,
"sequences_relation" : 6,
"paraphrase":2
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
# logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
# device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model = BertForSequenceClassification.from_pretrained(
args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels=num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * model_utils.warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
# tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = model_utils.sequence_classifier_accuracy(logits, label_ids)
# eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss/nb_tr_steps}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
#########################################################
# class MrpcProcessor(DataProcessor):
# """Processor for the MRPC data set (GLUE version)."""
#
# def get_train_examples(self, data_dir):
# """See base class."""
# # logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
#
# def get_dev_examples(self, data_dir):
# """See base class."""
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
#
# def get_labels(self):
# """See base class."""
# return ["0", "1"]
#
# def _create_examples(self, lines, set_type):
# """Creates examples for the training and dev sets."""
# examples = []
# for (i, line) in enumerate(lines):
# if i == 0:
# continue
# guid = "%s-%s" % (set_type, i)
# text_a = line[3]
# text_b = line[4]
# label = line[0]
# examples.append(
# InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
# return examples
# class MnliProcessor(DataProcessor):
# """Processor for the MultiNLI data set (GLUE version)."""
#
# def get_train_examples(self, data_dir):
# """See base class."""
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
#
# def get_dev_examples(self, data_dir):
# """See base class."""
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
# "dev_matched")
#
# def get_labels(self):
# """See base class."""
# return ["contradiction", "entailment", "neutral"]
#
# def _create_examples(self, lines, set_type):
# """Creates examples for the training and dev sets."""
# examples = []
# for (i, line) in enumerate(lines):
# if i == 0:
# continue
# guid = "%s-%s" % (set_type, line[0])
# text_a = line[8]
# text_b = line[9]
# label = line[-1]
# examples.append(
# InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
# return examples
# class ColaProcessor(DataProcessor):
# """Processor for the CoLA data set (GLUE version)."""
#
# def get_train_examples(self, data_dir):
# """See base class."""
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
#
# def get_dev_examples(self, data_dir):
# """See base class."""
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
#
# def get_labels(self):
# """See base class."""
# return ["0", "1"]
#
# def _create_examples(self, lines, set_type):
# """Creates examples for the training and dev sets."""
# examples = []
# for (i, line) in enumerate(lines):
# guid = "%s-%s" % (set_type, i)
# text_a = line[3]
# label = line[1]
# examples.append(
# InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
# return examples
# class HeadwordDirectionProcess(DataProcessor):
# """Processor for the sequences relation classifier data set"""
#
# def get_train_examples(self, data_dir):
# """See base class."""
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
#
# def get_dev_examples(self, data_dir):
# """See base class."""
# return self._create_examples(
# self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
#
# def get_labels(self):
# """See base class."""
# return ["left", "right"]
#
# def get_simple_examples(self, line_a, line_b):
# """See base class."""
# return self._create_examples_simple(line_a, line_b, "test")
#
# def _create_examples_simple(self, line_a, line_b, set_type):
# """Creates examples for the training and dev sets."""
# examples = []
# guid = "%s-%s" % (set_type, 0)
# text_a = line_a
# text_b = line_b
# label = 'left'
# examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
# return examples
#
# def _create_examples(self, lines, set_type):
# """Creates examples for the training and dev sets.
# what is the name of the director of computer ? apricot compound
# """
# examples = []
# for (i, line) in enumerate(lines):
# guid = "%s-%s" % (set_type, i)
# text_a = line[0]
# text_b = line[1]
# label = line[2]
# examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
# return examples
| 28,072 | 40.9 | 134 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert/run_redundancy_span.py | import torch
import collections
import random
import numpy as np
import pickle
import json
import sys
import os
#---------------------------------
from tqdm import tqdm, trange
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
#---------------------------------
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
os.environ['CUDA_VISIBLE_DEVICES']='1'
##################################################
from parsing.models.pytorch_pretrained_bert.tokenization import whitespace_tokenize, BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForQuestionAnswering
from parsing.models.pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from parsing.models.pytorch_pretrained_bert.optimization import BertAdam
from parsing.models import model_utils
from parsing.models.fine_tuning_based_on_bert import span_utils
#################################################
class SequenceExample(object):
'''a single training/test example for the sequence dataset.'''
def __init__(self, qas_id, doc_tokens, orig_answer_text=None, start_position=None, end_position=None):
self.qas_id = qas_id
# self.question_text = question_text
self.doc_tokens = doc_tokens
# self.orig_index_to_pos = orig_index_to_pos
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position: s += ", start_position: %d" % (self.start_position)
if self.start_position: s += ", end_position: %d" % (self.end_position)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map,
token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
##################################################
def read_many_examples(input_file, is_training):
'''who was the american in space ? in space'''
lines_list = span_utils.read_cols_lines(input_file=input_file)
examples = []
for i in range(len(lines_list)):
paragraph_text = lines_list[i][0]
answer_text = lines_list[i][1]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if span_utils.is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
if is_training:
qas_id = 'train_' + str(i)
else:
qas_id = 'test_' + str(i)
# question_text = 'abc' #no use
start_position = None
end_position = None
orig_answer_text = None
if is_training:
if len(answer_text) != 1:
raise ValueError("For training, each question should have exactly 1 answer.")
orig_answer_text = answer_text
answer_offset = paragraph_text.find(answer_text) # answer_start
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
print(paragraph_text, '\t', answer_text)
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
continue
else:
orig_answer_text = answer_text
example = SequenceExample(
qas_id=qas_id,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples
def read_one_example(one_line):
paragraph_text = one_line
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if span_utils.is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
# question_text = 'abc' # no use
start_position = None
end_position = None
orig_answer_text = None
examples = []
example = SequenceExample(
qas_id="test",
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples
##################################################
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
tok_to_orig_index = [] #token - orig word index
orig_to_tok_index = [] #orig word - token index
all_doc_tokens = [] #all tokens
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = span_utils._improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
# max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
max_tokens_for_doc = max_seq_length - 0 - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
# pylint: disable=invalid-name
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(1)
for i in range(doc_span.length):
split_token_index = doc_span.start + i #split start
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = span_utils._check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
# doc_offset = len(query_tokens) + 2
doc_offset = 1
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
# print('#############', tokens)
# print('#############', tokens_pos)
# print('#############', input_ids)
# print('#############', input_pos_ids)
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position))
unique_id += 1
return features
##################################################
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, verbose_logging):
"""Write final predictions to the json file."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
# pylint: disable=invalid-name
_PrelimPrediction = collections.namedtuple("PrelimPrediction", ["feature_index", "start_index", "start_logit", "end_index", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
eval_accuracy = 0
instance_num = 0
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
correct_span = example.orig_answer_text
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
if feature.unique_id not in unique_id_to_result.keys(): continue
result = unique_id_to_result[feature.unique_id]
start_indexes = span_utils._get_best_indexes(result.start_logits, n_best_size)
end_indexes = span_utils._get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
if length == 1:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
# pylint: disable=invalid-name
_NbestPrediction = collections.namedtuple( "NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = span_utils.get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = span_utils._compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
if entry.text == correct_span:
output["accuracy"] = 1
else:
output["accuracy"] = 0
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = (nbest_json[0]["text"], nbest_json[0]["accuracy"])
eval_accuracy += nbest_json[0]["accuracy"]
instance_num += 1
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
# with open(output_nbest_file, "w") as writer:
# writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
result = eval_accuracy / instance_num
print("#result:\t", result)
result_json = collections.OrderedDict()
result_json['result'] = result
result_json['eval_accuracy'] = eval_accuracy
result_json['instance_num'] = instance_num
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(result_json, indent=4) + "\n")
def write_span_headwords_with_nbest(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, verbose_logging):
'''get span, headwords ,and nbest'''
span = None
nbest_json = None
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
# pylint: disable=invalid-name
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = span_utils._get_best_indexes(result.start_logits, n_best_size)
end_indexes = span_utils._get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
if length == 1:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
# pylint: disable=invalid-name
_NbestPrediction = collections.namedtuple("NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = span_utils.get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = span_utils._compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
# predict_span_with_headword_dict = {}
# predict_span_with_headword_dict['span'] = nbest_json[0]["text"]
# predict_span_with_headword_dict['headword'] = nbest_json[0]["headword_text"]
# all_predictions[example.qas_id] = predict_span_with_headword_dict
span = nbest_json[0]["text"]
# all_nbest_json[example.qas_id] = nbest_json
return span, nbest_json
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"])
def main(args=None):
if args is None:
args = model_utils.run_redundancy_span_get_local_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
# logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
# device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError("If `do_train` is True, then `train_file` must be specified.")
if args.do_predict:
if not args.predict_file:
raise ValueError("If `do_predict` is True, then `predict_file` must be specified.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory () already exists and is not empty.")
os.makedirs(args.output_dir, exist_ok=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = read_many_examples(input_file=args.train_file, is_training=True)
num_train_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model = BertForQuestionAnswering.from_pretrained(args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
print (PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format(
args.bert_model, str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length))
train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
# logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
# logger.info("***** Running training *****")
# logger.info(" Num orig examples = %d", len(train_examples))
# logger.info(" Num split examples = %d", len(train_features))
# logger.info(" Batch size = %d", args.train_batch_size)
# logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids=input_ids, token_type_ids=segment_ids,
attention_mask=input_mask, start_positions=start_positions, end_positions=end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward(retain_graph=True)
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * span_utils.warmup_linear(global_step / t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = BertForQuestionAnswering.from_pretrained(args.bert_model, state_dict=model_state_dict)
model.to(device)
if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_many_examples(input_file=args.predict_file, is_training=False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
# logger.info("***** Running predictions *****")
# logger.info(" Num orig examples = %d", len(eval_examples))
# logger.info(" Num split examples = %d", len(eval_features))
# logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
# all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
# all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
# logger.info("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices, in tqdm(eval_dataloader, desc="Evaluating"):
# if len(all_results) % 1000 == 0: logger.info("Processing example: %d" % (len(all_results)))
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
# unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=int(eval_feature.unique_id), start_logits=start_logits, end_logits=end_logits))
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results, args.n_best_size, args.max_answer_length,
args.do_lower_case, output_prediction_file, output_nbest_file, args.verbose_logging)
if __name__ == "__main__":
main()
| 35,296 | 45.321522 | 141 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert/run_token_classifier.py | """BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import mmap
import csv
import os
import random
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
#-----------------------------------
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
os.environ['CUDA_VISIBLE_DEVICES']='2'
#------------------------------------------------
from parsing.models.pytorch_pretrained_bert.tokenization import BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForTokenClassification
from parsing.models.pytorch_pretrained_bert.optimization import BertAdam
from parsing.models.pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from parsing.models import model_utils
###############################################
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
###############################################
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_line_data(cls, input_file):
'''read 'what amenities are provided in the lanna thai restaurant ?\tO I O O O O I I I O' '''
with open(input_file, 'r', encoding='utf-8') as f:
lines = []
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
line = mm.readline()
while line:
tmp_list = line.decode().replace('\r\n', '').replace('\n', '').split('\t')
if len(tmp_list) >= 2:
lines.append([tmp_list[1], tmp_list[0]])
line = mm.readline()
mm.close()
f.close()
return lines
# @classmethod
# def _read_data(cls, input_file):
# """Reads a BIO data."""
# with open(input_file) as f:
# lines = []
# words = []
# labels = []
# for line in f:
# contends = line.strip()
# word = line.strip().split(' ')[0]
# label = line.strip().split(' ')[-1]
# if contends.startswith("-DOCSTART-"):
# words.append('')
# continue
# if len(contends) == 0 and words[-1] == '.':
# l = ' '.join([label for label in labels if len(label) > 0])
# w = ' '.join([word for word in words if len(word) > 0])
# lines.append([l, w])
# words = []
# labels = []
# continue
# words.append(word)
# labels.append(label)
# return lines
class NodeRecogniationProcessor(DataProcessor):
def get_sequence_example(self,sequence):
guid = "%s-%s" % ('test', 0)
# text = tokenizer.convert_to_unicode(line[1])
# label = tokenization.convert_to_unicode(line[0])
text_a = sequence
return InputExample(guid=guid, text_a=text_a, text_b=None, label=None)
def get_train_examples(self, data_dir):
return self._create_example(
self._read_line_data(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_line_data(os.path.join(data_dir, "dev.txt")), "dev")
def get_test_examples(self,data_dir):
return self._create_example(
self._read_line_data(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
# return ["0", "I", "X", "[CLS]", "[SEP]", "comparative", "class", "entity",
# "count", "literal", "relation", "target", "superlative", "neg"]
#"0", "I", "X", "[CLS]", "[SEP]", "comparative", "class", "entity",
# "count", "literal", "relation", "target", "superlative", "neg"
return ["O", "X", "[CLS]", "[SEP]", "class", "entity", "literal"]
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
# text = tokenizer.convert_to_unicode(line[1])
# label = tokenization.convert_to_unicode(line[0])
text_a = line[1]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
###############################################
def convert_example_to_features_for_test(example, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
tokens = []
labels_temp = []
for i, word in enumerate(example.text_a.split(' ')):
token_wordpiece = tokenizer.tokenize(word) #04/26/1882 ['04', '/', '26', '/', '1882']
tokens.extend(token_wordpiece)
for m in range(len(token_wordpiece)):
if m == 0:
labels_temp.append(0)
else:
labels_temp.append('X')
# max_seq_length-1
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels_temp = labels_temp[0:(max_seq_length - 2)]
ntokens = []
new_labels_temp = []
segment_ids = []
ntokens.append('[CLS]')
new_labels_temp.append('[CLS]')
segment_ids.append(0)
for i, token in enumerate(tokens):
ntokens.append(token)
new_labels_temp.append(labels_temp[i])
segment_ids.append(0)
ntokens.append('[SEP]')
new_labels_temp.append('[SEP]')
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
#if the length is short, tianbu 0
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
#we do not concerned about it
ntokens.append('NULL')
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=None))
return features, new_labels_temp
def convert_examples_to_features_for_train(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)} #label -> i index dictionary
features = []
for (ex_index, example) in enumerate(examples):
label_list = example.label.split(' ')
tokens = []
labels = []
for i, word in enumerate(example.text_a.split(' ')): #textlist
token_wordpiece = tokenizer.tokenize(word)
tokens.extend(token_wordpiece)
label_current = label_list[i]
for m in range(len(token_wordpiece)):
if m == 0:
labels.append(label_current)
else:
labels.append('X')
# max_seq_length-1
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append('[CLS]')
segment_ids.append(0)
label_ids.append(label_map['[CLS]'])
# print(tokens, labels)
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label_map[labels[i]])
ntokens.append('[SEP]')
segment_ids.append(0)
label_ids.append(label_map['[SEP]'])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
#if the length is short, tianbu 0
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
#we do not concerned about it
label_ids.append(0)
ntokens.append('NULL')
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids))
return features
###############################################
def main(args=None):
if args is None:
args = model_utils.run_token_classifier_get_local_args()
# task_name = 'ner'
# task_name = 'importantwords'
# task_name = 'headword'
task_name = 'node_recognition'
processors = {
# "ner": NerProcessor,
# "importantwords": ImportantwordRecogniationProcessor,
# "headword": HeadwordProcessor
"node_recognition": NodeRecogniationProcessor,
}
num_labels_task = {
# "ner" : len(processor.get_labels()),
# "importantwords": len(processor.get_labels()),
# "headword": len(processor.get_labels())
"node_recognition": 7,
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
# logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
# device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
# task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model = BertForTokenClassification.from_pretrained(
args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels=num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features_for_train(train_examples, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * model_utils.warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = BertForTokenClassification.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features_for_train(eval_examples, label_list, args.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader: #8*128
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
# tmp_eval_loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids)
logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = logits.detach().cpu().numpy() #[batch_size, sequence_length, num_labels]. #2*128*label_num_size
label_ids = label_ids.to('cpu').numpy()
#---------------------------------------------
# outputs = np.argmax(logits, axis=2)
# print('#outputs:\t', outputs.shape)
# print('#label_ids:\t', label_ids.shape)
# [rows, cols] = outputs.shape
# print(rows, cols)
# for x_axis in range(rows):
# sequence_output = outputs[x_axis]
# labels = label_ids[x_axis]
# print(sequence_output, labels)
#---------------------------------------------
tmp_eval_accuracy = model_utils.token_classifier_accuracy(logits, label_ids)
# eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0) * input_ids.size(1)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss/nb_tr_steps}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 21,450 | 42.867076 | 134 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert/run_headword_span.py | """Run BERT on SQuAD."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import random
import pickle
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
#-----------------------------------
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath) #'D:\\PycharmProjects\\kbcqa\\models']
#------------------------------------------------
from parsing.models.pytorch_pretrained_bert.tokenization import whitespace_tokenize, BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForQuestionAnswering
from parsing.models.pytorch_pretrained_bert.optimization import BertAdam
from parsing.models.pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from parsing.models.fine_tuning_based_on_bert import span_utils
from parsing.models import model_utils
#------------------------------------------------
class SquadExample(object):
"""A single training/test example for the Squad dataset."""
def __init__(self, qas_id, question_text, doc_tokens, doc_char_to_word_offset=None,
orig_answer_text=None, start_position=None, end_position=None):
self.qas_id = qas_id #qid
self.question_text = question_text #question string
self.doc_tokens = doc_tokens #passage context string
self.doc_char_to_word_offset = doc_char_to_word_offset
self.orig_answer_text = orig_answer_text #answer string
self.start_position = start_position #start position index
self.end_position = end_position #end position index
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position: s += ", start_position: %d" % (self.start_position)
if self.start_position: s += ", end_position: %d" % (self.end_position)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
#########################################################################
def read_many_examples(input_file, is_training):
'''2019.06.19'''
lines_list = span_utils.read_cols_lines(input_file=input_file)
examples = []
for i in range(len(lines_list)):
line_list = lines_list[i]
paragraph_text = line_list[0]
question_text = line_list[1]
answer_text = line_list[2]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if span_utils.is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
if is_training:
qas_id = 'train_' + str(i)
else:
qas_id = 'test_' + str(i)
start_position = None
end_position = None
orig_answer_text = None
if is_training:
if len(answer_text) == 0:
raise ValueError('For training, each question should have exactly 1 answer.')
orig_answer_text = answer_text
# answer_offset = paragraph_text.find(answer_text)
answer_offset = span_utils.duplicate_word(paragraph_text=paragraph_text, span=question_text, headword=answer_text)
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
continue
else:
orig_answer_text = answer_text
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
doc_char_to_word_offset=char_to_word_offset,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples
def read_one_example(paragraph, question):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
paragraph_text = paragraph.strip()
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
# start_position = None
# end_position = None
# orig_answer_text = None
examples = []
example = SquadExample(
qas_id="test",
question_text=question.strip(),
doc_tokens=doc_tokens,
doc_char_to_word_offset=char_to_word_offset,
orig_answer_text=None,
start_position=None,
end_position=None)
examples.append(example)
return examples
#########################################################################
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = [] #token - orig word index
orig_to_tok_index = [] #orig word - token index
all_doc_tokens = [] #all tokens
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = span_utils._improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
# pylint: disable=invalid-name
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = span_utils._check_is_max_context(doc_spans, doc_span_index,split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position))
unique_id += 1
return features
#########################################################################
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, verbose_logging):
"""Write final predictions to the json file."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
# pylint: disable=invalid-name
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
eval_accuracy = 0
instance_num = 0
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
correct_headword = example.orig_answer_text
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
if feature.unique_id not in unique_id_to_result.keys(): continue
result = unique_id_to_result[feature.unique_id]
start_indexes = span_utils._get_best_indexes(result.start_logits, n_best_size)
end_indexes = span_utils._get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
#heuristics: the author add the constraint that the end must come after the start
continue
if start_index != end_index: #length == 1
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
# pylint: disable=invalid-name
_NbestPrediction = collections.namedtuple("NbestPrediction",
["text", "text_index", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
#######################
headword_orig_index = " ".join(feature.tokens[pred.start_index: (pred.end_index + 1)])
if headword_orig_index in ['[CLS]', '[SEP]']:
continue
if pred.start_index in feature.token_to_orig_map.keys():
headword_orig_index = feature.token_to_orig_map[pred.start_index]
###########################################################
passage_tokens = example.doc_tokens
question_str = example.question_text
passage_char_to_word_offset = example.doc_char_to_word_offset
passage_text = ' '.join(passage_tokens)
start_position_char = passage_text.find(question_str)
end_position_char = start_position_char + len(question_str) - 1
span_start_position_word = passage_char_to_word_offset[start_position_char]
span_end_position_word = passage_char_to_word_offset[end_position_char]
# print(span_start_position_word, span_end_position_word)
if span_start_position_word <= headword_orig_index <= span_end_position_word:
continue
###########################################################
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = span_utils.get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
text_index=headword_orig_index,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", text_index=0, start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = span_utils._compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["text_index"] = entry.text_index
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
if entry.text == correct_headword:
output["accuracy"] = 1
else:
output["accuracy"] = 0
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = (nbest_json[0]["text"], nbest_json[0]["accuracy"])
all_nbest_json[example.qas_id] = nbest_json
eval_accuracy += nbest_json[0]["accuracy"]
instance_num += 1
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
# with open(output_nbest_file, "w") as writer:
# writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
result = eval_accuracy / instance_num
print("#result:\t", result)
result_json = collections.OrderedDict()
result_json['result'] = result
result_json['eval_accuracy'] = eval_accuracy
result_json['instance_num'] = instance_num
with open(output_nbest_file, "w") as writer:
# writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
writer.write(json.dumps(result_json, indent=4) + "\n")
print ("#result:\t", eval_accuracy / instance_num)
def write_span_headwords_with_nbest(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, verbose_logging):
'''get span, headwords ,and nbest'''
headword_index = None
nbest_json = None
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
# pylint: disable=invalid-name
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
# all_predictions = collections.OrderedDict()
# all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
if feature.unique_id not in unique_id_to_result.keys(): continue
result = unique_id_to_result[feature.unique_id]
start_indexes = span_utils._get_best_indexes(result.start_logits, n_best_size)
end_indexes = span_utils._get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
if start_index != end_index: #only one length
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
# pylint: disable=invalid-name
_NbestPrediction = collections.namedtuple("NbestPrediction", ["text", "text_index", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
headword_orig_index = " ".join(feature.tokens[pred.start_index: (pred.end_index + 1)])
###########################################################
if headword_orig_index in ['[CLS]', '[SEP]']:
continue
if pred.start_index in feature.token_to_orig_map.keys():
headword_orig_index = feature.token_to_orig_map[pred.start_index]
###########################################################
passage_tokens = example.doc_tokens
question_str = example.question_text
passage_char_to_word_offset = example.doc_char_to_word_offset
passage_text = ' '.join(passage_tokens)
start_position_char = passage_text.find(question_str)
end_position_char = start_position_char + len(question_str) - 1
span_start_position_word = passage_char_to_word_offset[start_position_char]
span_end_position_word = passage_char_to_word_offset[end_position_char]
# print(span_start_position_word, span_end_position_word)
if span_start_position_word <= headword_orig_index <= span_end_position_word:
continue
###########################################################
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = span_utils.get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in ['?'] or final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
text_index=headword_orig_index,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", text_index=0, start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = span_utils._compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["text_index"] = entry.text_index
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
# predict_span_with_headword_dict = {}
# predict_span_with_headword_dict['span'] = nbest_json[0]["text"]
# predict_span_with_headword_dict['headword'] = nbest_json[0]["headword_text"]
# all_predictions[example.qas_id] = predict_span_with_headword_dict
headword_index = nbest_json[0]["text_index"]
# all_nbest_json[example.qas_id] = nbest_json
return headword_index, nbest_json
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"])
def main(args=None):
if args is None:
args = model_utils.run_redundancy_span_get_local_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError("If `do_train` is True, then `train_file` must be specified.")
if args.do_predict:
if not args.predict_file:
raise ValueError("If `do_predict` is True, then `predict_file` must be specified.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory () already exists and is not empty.")
os.makedirs(args.output_dir, exist_ok=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = read_many_examples(input_file=args.train_file, is_training=True)
num_train_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model = BertForQuestionAnswering.from_pretrained(args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],'weight_decay': 0.0} ]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
cached_train_features_file = args.train_file+'_{0}_{1}_{2}_{3}'.format(
args.bert_model, str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length))
train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
# logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask,
start_positions=start_positions, end_positions=end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * span_utils.warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = BertForQuestionAnswering.from_pretrained(args.bert_model, state_dict=model_state_dict)
model.to(device)
if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_many_examples(input_file=args.predict_file, is_training=False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
# logger.info("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(
input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results,
args.n_best_size, args.max_answer_length,
args.do_lower_case, output_prediction_file,
output_nbest_file, args.verbose_logging)
if __name__ == "__main__":
main()
| 38,538 | 45.884428 | 141 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert/run_joint_three_models.py | import logging
import collections
import torch
import random
import numpy as np
import pickle
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from tqdm import tqdm, trange
import json
import sys
import os
#------------------------------------------------
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
os.environ['CUDA_VISIBLE_DEVICES']='1'
#------------------------------------------------
from parsing.models.pytorch_pretrained_bert.tokenization import whitespace_tokenize, BertTokenizer
from parsing.models.pytorch_pretrained_bert.modeling import BertForSpanWithHeadwordWithLabel
from parsing.models.pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from parsing.models.pytorch_pretrained_bert.optimization import BertAdam
from parsing.models import model_utils
from parsing.models.fine_tuning_based_on_bert import span_utils
#------------------------------------------------
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
##################################################
class SpanWithHeadwordWithLabelExample(object):
'''a single training/test example for the span prediction dataset.'''
def __init__(self, qas_id, doc_tokens, orig_answer_text=None, orig_headword_text=None,
start_position=None, end_position=None, headword_position=None, label=None):
self.qas_id = qas_id
# self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.orig_headword_text = orig_headword_text
self.head_position = headword_position
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position: s += ", start_position: %d" % (self.start_position)
if self.start_position: s += ", end_position: %d" % (self.end_position)
if self.head_position: s += ", head_position: %d" % (self.head_position)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
doc_tokens,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
headword_position=None,
label_id=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.doc_tokens = doc_tokens
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.headword_position = headword_position
self.label_id = label_id
##################################################
def read_many_examples(input_file, is_training):
'''
What religion was followed by the person ? by the person followed pp
read_span_with_headword_with_label
'''
lines_list = span_utils.read_cols_lines(input_file=input_file)
examples = []
for i in range(len(lines_list)):
paragraph_text = lines_list[i][0]
answer_text = lines_list[i][1]
headword_text = lines_list[i][2]
label = lines_list[i][3]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if span_utils.is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens)-1)
if is_training:
qas_id = 'train_'+str(i)
else:
qas_id = 'test_'+str(i)
# question_text = 'abc' #no use
start_position = None
end_position = None
headword_position = None
# orig_answer_text = None
# orig_headword_text = None
if is_training:
if len(answer_text) != 1:
raise ValueError("For training, each question should have exactly 1 answer.")
orig_answer_text = answer_text
orig_headword_text = headword_text
answer_offset = paragraph_text.find(answer_text) #answer_start
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
print (paragraph_text, '\t' , answer_offset)
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# headword_offset = paragraph_text.find(headword_text)
headword_offset = span_utils.duplicate_word(paragraph_text=paragraph_text, span=answer_text, headword=headword_text)
if headword_offset == -1: # this means that headword == null
headword_position = -1
else:
headword_position = char_to_word_offset[headword_offset]
# Only add answers where the text can be exactly recovered from the document.
# If this CAN'T happen it's likely due to weird Unicode stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
continue
else:
#headword_offset = paragraph_text.find(headword_text)
headword_offset = span_utils.duplicate_word(paragraph_text=paragraph_text, span=answer_text, headword=headword_text)
if headword_offset == -1: # this means that headword == null
headword_position = -1
else:
headword_position = char_to_word_offset[headword_offset]
orig_answer_text = answer_text
orig_headword_text = headword_text
example = SpanWithHeadwordWithLabelExample(
qas_id=qas_id,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
orig_headword_text=orig_headword_text,
start_position=start_position,
end_position=end_position,
headword_position=headword_position,
label=label)
examples.append(example)
return examples
def read_one_example(paragraph_text):
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if span_utils.is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
# question_text = 'abc' # no use
# start_position = None
# end_position = None
# orig_answer_text = None
# headword_position = None
# orig_headword_text = None
examples = []
# print ('#doc_tokens:\t', doc_tokens) ##doc_tokens: ['In', 'which', 'city', 'is', 'the', 'headquarter', 'of', 'Air', 'China', '?']
example = SpanWithHeadwordWithLabelExample(
qas_id="test",
doc_tokens=doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
orig_headword_text=None,
headword_position=None,
label='nmod')
examples.append(example)
return examples
##################################################
def convert_examples_to_features(examples, label_list, tokenizer, max_seq_length, doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
label_to_ids_map = {label : i for i, label in enumerate(label_list)}
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
tok_to_orig_index = [] #token - orig word index
orig_to_tok_index = [] #orig word - token index
all_doc_tokens = [] #all tokens
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
tok_headword_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
tok_headword_position = orig_to_tok_index[example.head_position]
(tok_start_position, tok_end_position) = span_utils._improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
# max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
max_tokens_for_doc = max_seq_length - 0 - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
# pylint: disable=invalid-name
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(1)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = span_utils._check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
headword_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
# doc_offset = len(query_tokens) + 2
doc_offset = 1
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
headword_position = tok_headword_position - doc_start + doc_offset
# print('#all doc tokens:\t', all_doc_tokens) #['in', 'which', 'city', 'is', 'the', 'head', '##qua', '##rter', 'of', 'air', 'chin', '##a', '?']
# print('#tokens:\t', tokens)
# print('#input_ids:\t', input_ids) #input_ids: [101, 170, 1830, 1665, 102, 1107, 1134, 1331, 1110, 1103, 1246, 13284, 27618, 1104, 1586, 5144, 1161, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# print('#input_mask:\t', input_mask) #input_mask: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# print('#segment_ids:\t', segment_ids) #segment_ids: [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# print('#token_to_orig_map:\t', token_to_orig_map) #{5: 0, 6: 1, 7: 2, 8: 3, 9: 4, 10: 5, 11: 5, 12: 5, 13: 6, 14: 7, 15: 8, 16: 8, 17: 9}
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
doc_tokens=example.doc_tokens,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
headword_position=headword_position,
label_id=label_to_ids_map[example.label]))
unique_id += 1
return features
##################################################
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, verbose_logging):
"""Write final predictions to the json file."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
# pylint: disable=invalid-name
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "start_index", "start_logit", "end_index", "end_logit",
"headword_index", "headword_logit", "label_id", "label_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
eval_accuracy = 0
instance_num = 0
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
correct_answer_text = example.orig_answer_text
# correct_headword_text = example.orig_headword_text
# correct_label = example.label
correct_headword_index = example.head_position
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
correct_label_id = feature.label_id
start_indexes = span_utils._get_best_indexes(result.start_logits, n_best_size)
end_indexes = span_utils._get_best_indexes(result.end_logits, n_best_size)
headword_indexs = span_utils._get_best_indexes(result.headword_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
for headword_index in headword_indexs: # headword index do not have overlap span
if start_index <= headword_index <= end_index:
continue
label_id = np.argmax(result.label_logits, axis=0)
label_logit = result.label_logits[label_id]
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
headword_index=headword_index,
headword_logit=result.headword_logits[headword_index],
label_id=str(label_id),
label_logit=label_logit))
prelim_predictions = sorted(
prelim_predictions, key=lambda x: (x.start_logit + x.end_logit + x.headword_logit + x.label_logit), reverse=True)
# pylint: disable=invalid-name
_NbestPrediction = collections.namedtuple(
"NbestPrediction", ["text", "start_logit", "end_logit", "headword_text", "headword_logit", "label_id", "label_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
headword_token = feature.tokens[pred.headword_index: (pred.headword_index + 1)]
headword_text = " ".join(headword_token)
######################################
if len(tok_tokens) == 1:
continue
if headword_text in ['[CLS]', '[SEP]']:
continue
if pred.headword_index in feature.token_to_orig_map.keys():
# headword_text = example.doc_tokens[feature.token_to_orig_map[pred.headword_index]]
headword_text = feature.token_to_orig_map[pred.headword_index]
# print(feature.token_to_orig_map)
# print(headword_text, pred.headword_index)
######################################
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = span_utils.get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[(final_text, headword_text)] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
headword_text=headword_text,
headword_logit=pred.headword_logit,
label_id=pred.label_id,
label_logit=pred.label_logit))
# In very rare edge cases we could have no valid predictions.
# So we just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0, headword_text="empty",
headword_logit=0.0, label_id="0", label_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit + entry.headword_logit + entry.label_logit)
probs = span_utils._compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
output["headword_text"] = entry.headword_text
output["headword_logit"] = entry.headword_logit
output["label_id"] = entry.label_id
output["label_logit"] = entry.label_logit
if entry.text == correct_answer_text and entry.headword_text == correct_headword_index and entry.label_id == correct_label_id:
output["accuracy"] = 1
else:
output["accuracy"] = 0
print (entry.text, entry.headword_text, entry.label_id)
print ('\t\t', correct_answer_text, correct_headword_index, correct_label_id)
nbest_json.append(output)
assert len(nbest_json) >= 1
predict_span_with_headword_dict = {}
predict_span_with_headword_dict['span'] = nbest_json[0]["text"]
predict_span_with_headword_dict['headword'] = nbest_json[0]["headword_text"]
predict_span_with_headword_dict['label_id'] = nbest_json[0]["label_id"]
predict_span_with_headword_dict['accuracy'] = nbest_json[0]["accuracy"]
# predict_span_with_headword_dict['label'] = nbest_json[0]["label_logit"]
# predict_span_with_headword_dict['label_argmax'] = np.argmax(nbest_json[0]["label_logit"], axis=1)
all_predictions[example.qas_id] = predict_span_with_headword_dict
all_nbest_json[example.qas_id] = nbest_json
eval_accuracy += nbest_json[0]["accuracy"]
instance_num += 1
result = eval_accuracy / instance_num
print("#result:\t", result)
result_json = collections.OrderedDict()
result_json['result'] = result
result_json['eval_accuracy'] = eval_accuracy
result_json['instance_num'] = instance_num
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
# writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
writer.write(json.dumps(result_json, indent=4) + "\n")
def write_span_headwords_with_nbest(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, verbose_logging):
'''get span, headwords ,and nbest'''
span = None
headword = None
label_id = None
nbest_json = None
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
# pylint: disable=invalid-name
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "start_index", "start_logit", "end_index", "end_logit",
"headword_index", "headword_logit", "label_id", "label_logit"])
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = span_utils._get_best_indexes(result.start_logits, n_best_size)
end_indexes = span_utils._get_best_indexes(result.end_logits, n_best_size)
headword_indexs = span_utils._get_best_indexes(result.headword_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
for headword_index in headword_indexs:
# headword index must be middle between start_index and end_index
# if headword_index < start_index or headword_index > end_index:
# continue
# headword index do not have overlap span
if start_index <= headword_index <= end_index:
continue
label_id = np.argmax(result.label_logits, axis=0)
label_logit = result.label_logits[label_id]
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
headword_index=headword_index,
headword_logit=result.headword_logits[headword_index],
label_id=str(label_id),
label_logit=label_logit))
prelim_predictions = sorted(prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit + x.headword_logit + x.label_logit), reverse=True)
# pylint: disable=invalid-name
_NbestPrediction = collections.namedtuple(
"NbestPrediction", ["text", "start_logit", "end_logit", "headword_text", "headword_logit", "label_id", "label_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
# print ('doc_tokensdoc_tokensdoc_tokens:\t', example.doc_tokens)
tok_text = " ".join(tok_tokens)
headword_text = " ".join(feature.tokens[pred.headword_index: (pred.headword_index + 1)])
if len(tok_tokens) == 1:
continue
if headword_text in ['[CLS]', '[SEP]']:
continue
if pred.headword_index in feature.token_to_orig_map.keys():
# headword_text = example.doc_tokens[feature.token_to_orig_map[pred.headword_index]]
headword_text = feature.token_to_orig_map[pred.headword_index]
# print(feature.token_to_orig_map)
# print(headword_text, pred.headword_index)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = span_utils.get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[(final_text, headword_text)] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
headword_text=headword_text,
headword_logit=pred.headword_logit,
label_id=pred.label_id,
label_logit=pred.label_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0, headword_text="empty",
headword_logit=0.0, label_id="0", label_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit + entry.headword_logit + entry.label_logit)
probs = span_utils._compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
output["headword_text"] = entry.headword_text
output["headword_logit"] = entry.headword_logit
output["label_id"] = entry.label_id
output["label_logit"] = entry.label_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
# predict_span_with_headword_dict = {}
# predict_span_with_headword_dict['span'] = nbest_json[0]["text"]
# predict_span_with_headword_dict['headword'] = nbest_json[0]["headword_text"]
# all_predictions[example.qas_id] = predict_span_with_headword_dict
span = nbest_json[0]["text"]
headword = nbest_json[0]["headword_text"]
label_id = nbest_json[0]["label_id"]
# all_nbest_json[example.qas_id] = nbest_json
return span, headword, label_id, nbest_json
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits", "headword_logits", "label_logits"])
#def main():
# args = model_utils.run_joint_three_models_get_local_args()
def main(args=None):
if args is None:
args = model_utils.run_joint_three_models_get_local_args()
print('#start:\t', args.learning_rate, args.train_batch_size, args.num_train_epochs)
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError("If `do_train` is True, then `train_file` must be specified.")
if args.do_predict:
if not args.predict_file:
raise ValueError("If `do_predict` is True, then `predict_file` must be specified.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory () already exists and is not empty.")
os.makedirs(args.output_dir, exist_ok=True)
#----------------------------------------------
labels_list = ["nmod", "conj", "acl:cl", "acl", "nmod:poss", "advcl", "xcomp"]
num_labels = len(labels_list)
#----------------------------------------------
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = read_many_examples(input_file=args.train_file, is_training=True)
num_train_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model = BertForSpanWithHeadwordWithLabel.from_pretrained(args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank), num_labels=num_labels)
print(PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format(
args.bert_model, str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length))
train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
label_list=labels_list,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
# logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
# logger.info("***** Running training *****")
# logger.info(" Num orig examples = %d", len(train_examples))
# logger.info(" Num split examples = %d", len(train_features))
# logger.info(" Batch size = %d", args.train_batch_size)
# logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
all_headword_positions = torch.tensor([f.headword_position for f in train_features], dtype=torch.long)
all_labels = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions,
all_end_positions, all_headword_positions, all_labels)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions, headword_positions, label_ids = batch
print('headword#####', len(headword_positions), headword_positions)
print('label_ids#####', len(label_ids), label_ids)
loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask,
start_positions=start_positions, end_positions=end_positions,
headword_positions=headword_positions, label_ids=label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * span_utils.warmup_linear(global_step / t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = BertForSpanWithHeadwordWithLabel.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_many_examples(input_file=args.predict_file, is_training=False)
eval_features = convert_examples_to_features(
examples=eval_examples,
label_list=labels_list,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
# logger.info("***** Running predictions *****")
# logger.info(" Num orig examples = %d", len(eval_examples))
# logger.info(" Num split examples = %d", len(eval_features))
# logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
#all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
# logger.info("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
# if len(all_results) % 1000 == 0: logger.info("Processing example: %d" % (len(all_results)))
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
#label_ids = label_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits, batch_headword_logits, batch_label_logits \
= model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
headword_logits = batch_headword_logits[i].detach().cpu().tolist()
label_logits = batch_label_logits[i].detach().cpu().tolist()
#label_logits_outputs = np.argmax(label_logits, axis=1)
#label_logits_outputs[0]
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits,
headword_logits=headword_logits,
label_logits=label_logits))
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results,
args.n_best_size, args.max_answer_length,
args.do_lower_case, output_prediction_file,
output_nbest_file, args.verbose_logging)
if __name__ == "__main__":
main()
| 46,532 | 48.293432 | 265 | py |
SPARQA | SPARQA-master/code/parsing/models/fine_tuning_based_on_bert/span_utils.py | import collections
import mmap
from parsing.models.pytorch_pretrained_bert.tokenization import BasicTokenizer
import re
import math
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens.
# So now `orig_text` contains the span of our original text
# corresponding to the span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
# if verbose_logging:
# logger.info(
# "Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
# if verbose_logging:
# logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text`
# using the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
# if verbose_logging:
# logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
# if verbose_logging:
# logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based.
# We first project them to whitespace-tokenized words.
# But then after WordPiece tokenization, we can often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).".
# However after tokenization, our tokens will be "( 1895 - 1943 ) .".
# So we can match the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def read_cols_lines(input_file):
lines_list = []
with open(input_file, 'r',encoding='utf-8') as reader:
mm = mmap.mmap(reader.fileno(), 0, access=mmap.ACCESS_READ)
line = mm.readline()
while line:
# find the assaults that happened on 4/7/1995 . the assaults that happened on 4/7/1995
# who was the american in space ? in space
lines_list.append(line.decode().strip().split('\t'))
line = mm.readline()
mm.close()
reader.close()
return lines_list
def duplicate_word(paragraph_text, span, headword):
'''
# paragraph_text = 'What is the Andy Warhol \'s job and film of Vincenzo Proietti ?'
# span = 'of Vincenzo Proietti'
# headword = 'job'
# headword_index = duplicate_word(paragraph_text=paragraph_text, span=span, headword=headword)
# print (headword_index)
'''
headword_index_list = [m.start() for m in re.finditer(headword, paragraph_text)]
# print(headword_index_list)
# headword_offset = paragraph_text.find(headword)
# print (headword_offset)
span_index_list = [m.start() for m in re.finditer(span, paragraph_text)]
print(span_index_list)
headword_index = -1
if len(span_index_list) > 0:
span_index = span_index_list[0]
min_distance = len(paragraph_text)
for headword_index_temp in headword_index_list:
if min_distance > math.fabs(headword_index_temp - span_index):
min_distance = math.fabs(headword_index_temp - span_index)
headword_index = headword_index_temp
return headword_index
| 9,725 | 40.211864 | 109 | py |
SPARQA | SPARQA-master/code/parsing/models/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 6,785 | 41.149068 | 116 | py |
SPARQA | SPARQA-master/code/parsing/models/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ModuleNotFoundError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 932 | 39.565217 | 137 | py |
SPARQA | SPARQA-master/code/parsing/models/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path
from parsing.parsing_args import bert_args
logger = logging.getLogger(__name__)
# PRETRAINED_MODEL_ARCHIVE_MAP = {
# 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
# 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
# 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
# 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
# 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
# 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
# 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
# }
PRETRAINED_MODEL_ARCHIVE_MAP = {
# 'bert-base-uncased': "../../resource/bert-base-uncased.tar.gz",
'bert-base-uncased': bert_args.bert_base_uncased_model,
'bert-base-cased': bert_args.bert_base_cased_model
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2) #2 classification
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
print('#config file:\t', config_file)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
the basic BERT Transformer model with a layer of summed token,
position and sequence embeddings followed by a series of
identical self-attention blocks (12 for BERT-base, 24 for BERT-large).
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token
(see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used
if the input sequence length is smaller than the max input sequence length
in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states
at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large),
each encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of a classifier pretrained on top of the hidden state
associated to the first character of the input (`CLF`)
to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(PreTrainedBertModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(PreTrainedBertModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForMultipleChoice(PreTrainedBertModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
##################################################
class BertForSequenceClassification(PreTrainedBertModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
#pooled_output is [cls]
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForTokenClassification(PreTrainedBertModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer
on top of the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# print('#num_labels:\t', self.num_labels) print('#labels:\t', len(labels), labels.shape)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(PreTrainedBertModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: either
- a BertConfig class instance with the configuration to build a new model, or
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
The pre-trained model will be downloaded and cached if needed.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# self.qa_outputs = nn.Linear(config.hidden_size * 2, 2) #2019.02.11
# from . import sequence_pos_lstm
# self.pos_lstm = sequence_pos_lstm.SequencePOSLSTM(sequence_pos_lstm.EMBEDDING_DIM,
# sequence_pos_lstm.HIDDEN_DIM, sequence_pos_lstm.VOCAB_SIZE)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None,
attention_mask=None, start_positions=None, end_positions=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
##################2019.02.11################
# input_pos_ids = None,
# input_pos_ids embeddings
# print('#shape:', sequence_output.shape) #torch.Size([1, 384, 768])
# print('#pos shape:', input_pos_ids.shape) #torch.Size([1, 384, 768])
# input_pos_ids = self.pos_lstm(input_pos_ids)
# print('#after lstm, pos shape:', input_pos_ids.shape)
# sequence_output = torch.cat((sequence_output, input_pos_ids), dim=2)
# print('#after cat, sequence shape:', sequence_output.shape) #torch.Size([1, 384, 1536])
# logits = self.qa_outputs(sequence_output)
# print('#logits:', logits.shape) ##logits: torch.Size([1, 384, 2])
# [batch_size, sequence_length, hidden_size],
##################2019.02.12 only pos#########
# input_pos_ids embeddings
# print('#shape:', sequence_output.shape) #torch.Size([1, 384, 768])
# print('#pos shape:', input_pos_ids.shape) #torch.Size([1, 384, 768])
# input_pos_ids = self.pos_lstm(input_pos_ids)
# print('#after lstm, pos shape:', input_pos_ids.shape)
# # sequence_output = torch.cat((sequence_output, input_pos_ids), dim=2)
# # print('#after cat, sequence shape:', sequence_output.shape) #torch.Size([1, 384, 1536])
# logits = self.qa_outputs(input_pos_ids)
# print('#logits:', logits.shape) ##logits: torch.Size([1, 384, 2])
# [batch_size, sequence_length, hidden_size],
##############################################
# print (logits)
# start_logits, end_logits, headwords_logits = logits.split(1, dim=-1)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# training
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
# testing
return start_logits, end_logits
class BertForSpanWithHeadwordWithLabel(PreTrainedBertModel):
def __init__(self, config, num_labels=2):
super(BertForSpanWithHeadwordWithLabel, self).__init__(config)
self.bert = BertModel(config)
#--------------token classifier---------
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 3)
self.apply(self.init_bert_weights)
#-----------------label--------------------
self.num_labels = num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.label_classifier = nn.Linear(config.hidden_size, num_labels)
# self.apply(self.init_bert_weights)
# -----------------------------------------
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, headword_positions=None, label_ids=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
# token classifier--------------------------
logits = self.qa_outputs(sequence_output)
# print (logits)
# start_logits, end_logits, headwords_logits = logits.split(1, dim=-1)
start_logits, end_logits, headword_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
headword_logits = headword_logits.squeeze(-1)
#label classifier--------------------------
pooled_output = self.dropout(pooled_output)
labels_logits = self.label_classifier(pooled_output)
if start_positions is not None and end_positions is not None and headword_positions is not None and labels_logits is not None:
# training
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
if len(headword_positions.size()) > 1:
headword_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
headword_positions.clamp_(0, ignored_index)
#token loss
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
headword_loss = loss_fct(headword_logits, headword_positions)
#label loss
# label_loss_fct = CrossEntropyLoss()
labels_loss = loss_fct(labels_logits.view(-1, self.num_labels), label_ids.view(-1))
#average
total_loss = (start_loss + end_loss + headword_loss + labels_loss) / 4
# total_loss = (start_loss + end_loss + headword_loss) / 3
return total_loss
else: # testing
return start_logits, end_logits, headword_logits, labels_logits
##################################################
# class BertForHeadword(PreTrainedBertModel):
#
# def __init__(self, config):
# super(BertForHeadword, self).__init__(config)
# self.bert = BertModel(config)
# # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# # self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.qa_outputs = nn.Linear(config.hidden_size, 1)
# self.apply(self.init_bert_weights)
#
# def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
# sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
# logits = self.qa_outputs(sequence_output)
# headword_logits = logits.split(1, dim=-1)
# headword_logits = headword_logits.squeeze(-1)
#
# if start_positions is not None :
# # training
# # If we are on multi-GPU, split add a dimension
# if len(start_positions.size()) > 1:
# start_positions = start_positions.squeeze(-1)
# # sometimes the start/end positions are outside our model inputs, we ignore these terms
# ignored_index = headword_logits.size(1)
# start_positions.clamp_(0, ignored_index)
# loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
# headword_loss = loss_fct(headword_logits, start_positions)
# return headword_loss
# else:
# # testing
# return headword_logits
# class BertForSpanWithHeadword(PreTrainedBertModel):
# def __init__(self, config):
# super(BertForSpanWithHeadword, self).__init__(config)
# self.bert = BertModel(config)
# # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# # self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.qa_outputs = nn.Linear(config.hidden_size, 3)
# self.apply(self.init_bert_weights)
#
# def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
# end_positions=None, headword_positions=None):
# sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
# logits = self.qa_outputs(sequence_output)
# # print (logits)
# # start_logits, end_logits, headwords_logits = logits.split(1, dim=-1)
# start_logits, end_logits, headword_logits = logits.split(1, dim=-1)
# start_logits = start_logits.squeeze(-1)
# end_logits = end_logits.squeeze(-1)
# headword_logits = headword_logits.squeeze(-1)
# if start_positions is not None and end_positions is not None and headword_positions is not None:
# # training
# # If we are on multi-GPU, split add a dimension
# if len(start_positions.size()) > 1:
# start_positions = start_positions.squeeze(-1)
# if len(end_positions.size()) > 1:
# end_positions = end_positions.squeeze(-1)
# if len(headword_positions.size()) > 1:
# headword_positions = end_positions.squeeze(-1)
# # sometimes the start/end positions are outside our model inputs, we ignore these terms
# ignored_index = start_logits.size(1)
# start_positions.clamp_(0, ignored_index)
# end_positions.clamp_(0, ignored_index)
# headword_positions.clamp_(0, ignored_index)
#
# loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
# start_loss = loss_fct(start_logits, start_positions)
# end_loss = loss_fct(end_logits, end_positions)
# headword_loss = loss_fct(headword_logits, headword_positions)
# total_loss = (start_loss + end_loss + headword_loss) / 3
# return total_loss
# else:
# # testing
# return start_logits, end_logits, headword_logits
# bei fei
# class BertForSequenceClassification(PreTrainedBertModel):
# """BERT model for classification.
# This module is composed of the BERT model with a linear layer on top of the pooled output.
#
# Params:
# `config`: a BertConfig class instance with the configuration to build a new model.
# `num_labels`: the number of classes for the classifier. Default = 2.
#
# Inputs:
# `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
# with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
# `extract_features.py`, `run_classifier.py` and `run_squad.py`)
# `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
# types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
# a `sentence B` token (see BERT paper for more details).
# `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
# selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
# input sequence length in the current batch. It's the mask that we typically use for attention when
# a batch has varying length sentences.
# `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
# with indices selected in [0, ..., num_labels].
#
# Outputs:
# if `labels` is not `None`:
# Outputs the CrossEntropy classification loss of the output with the labels.
# if `labels` is `None`:
# Outputs the classification logits of shape [batch_size, num_labels].
#
# Example usage:
# ```python
# # Already been converted into WordPiece token ids
# input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
# input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
# token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
#
# config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
# num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
#
# num_labels = 2
#
# model = BertForSequenceClassification(config, num_labels)
# logits = model(input_ids, token_type_ids, input_mask)
# ```
# """
# def __init__(self, config, num_labels=2):
# super(BertForSequenceClassification, self).__init__(config)
# self.num_labels = num_labels
# self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier = nn.Linear(config.hidden_size, num_labels)
# self.apply(self.init_bert_weights)
#
# def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
# #pooled_output is [cls]
# _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
# pooled_output = self.dropout(pooled_output)
# logits = self.classifier(pooled_output)
#
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# return loss
# else:
# return logits
#############################################
# Bei fei
# class BertForQuestionAnswering(PreTrainedBertModel):
# """BERT model for Question Answering (span extraction).
# This module is composed of the BERT model with a linear layer on top of
# the sequence output that computes start_logits and end_logits
#
# Params:
# `config`: either
# - a BertConfig class instance with the configuration to build a new model, or
# - a str with the name of a pre-trained model to load selected in the list of:
# . `bert-base-uncased`
# . `bert-large-uncased`
# . `bert-base-cased`
# . `bert-base-multilingual`
# . `bert-base-chinese`
# The pre-trained model will be downloaded and cached if needed.
#
# Inputs:
# `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
# with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
# `extract_features.py`, `run_classifier.py` and `run_squad.py`)
# `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
# types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
# a `sentence B` token (see BERT paper for more details).
# `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
# selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
# input sequence length in the current batch. It's the mask that we typically use for attention when
# a batch has varying length sentences.
# `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
# Positions are clamped to the length of the sequence and position outside of the sequence are not taken
# into account for computing the loss.
# `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
# Positions are clamped to the length of the sequence and position outside of the sequence are not taken
# into account for computing the loss.
#
# Outputs:
# if `start_positions` and `end_positions` are not `None`:
# Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
# if `start_positions` or `end_positions` is `None`:
# Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
# position tokens of shape [batch_size, sequence_length].
#
# Example usage:
# ```python
# # Already been converted into WordPiece token ids
# input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
# input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
# token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
#
# config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
# num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
#
# model = BertForQuestionAnswering(config)
# start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
# ```
# """
# def __init__(self, config):
# super(BertForQuestionAnswering, self).__init__(config)
# self.bert = BertModel(config)
# # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# # self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.qa_outputs = nn.Linear(config.hidden_size, 2)
# self.apply(self.init_bert_weights)
#
# def forward(self, input_ids, token_type_ids=None,
# attention_mask=None, start_positions=None, end_positions=None):
# sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
# logits = self.qa_outputs(sequence_output)
# # print (logits)
# # start_logits, end_logits, headwords_logits = logits.split(1, dim=-1)
# start_logits, end_logits = logits.split(1, dim=-1)
#
# start_logits = start_logits.squeeze(-1)
# end_logits = end_logits.squeeze(-1)
#
# if start_positions is not None and end_positions is not None:
# # training
# # If we are on multi-GPU, split add a dimension
# if len(start_positions.size()) > 1:
# start_positions = start_positions.squeeze(-1)
# if len(end_positions.size()) > 1:
# end_positions = end_positions.squeeze(-1)
# # sometimes the start/end positions are outside our model inputs, we ignore these terms
# ignored_index = start_logits.size(1)
# start_positions.clamp_(0, ignored_index)
# end_positions.clamp_(0, ignored_index)
#
# loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
# start_loss = loss_fct(start_logits, start_positions)
# end_loss = loss_fct(end_logits, end_positions)
# total_loss = (start_loss + end_loss) / 2
# return total_loss
# else:
# # testing
# return start_logits, end_logits
# class BertForSpanWithHeadwordPrediction(PreTrainedBertModel):
# """BERT model for Question Answering (span extraction).
# This module is composed of the BERT model with a linear layer on top of
# the sequence output that computes start_logits and end_logits
#
# Params:
# `config`: either
# - a BertConfig class instance with the configuration to build a new model, or
# - a str with the name of a pre-trained model to load selected in the list of:
# . `bert-base-uncased`
# . `bert-large-uncased`
# . `bert-base-cased`
# . `bert-base-multilingual`
# . `bert-base-chinese`
# The pre-trained model will be downloaded and cached if needed.
#
# Inputs:
# `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
# with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
# `extract_features.py`, `run_classifier.py` and `run_squad.py`)
# `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
# types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
# a `sentence B` token (see BERT paper for more details).
# `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
# selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
# input sequence length in the current batch. It's the mask that we typically use for attention when
# a batch has varying length sentences.
# `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
# Positions are clamped to the length of the sequence and position outside of the sequence are not taken
# into account for computing the loss.
# `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
# Positions are clamped to the length of the sequence and position outside of the sequence are not taken
# into account for computing the loss.
# 'headword_position': position of the headword for the labeled span: torch.LongTensor of shape [batch_size].
# Positions are clamped to the length of the sequence and position outside of the sequence are not taken
# into account for computing the loss.
#
# Outputs:
# if `start_positions`, `end_positions`, and headword_position are not `None`:
# Outputs the total_loss which is the sum of the CrossEntropy loss for the start, end, and headword token positions.
# if `start_positions` or `end_positions` or headword_position is `None`:
# Outputs a tuple of start_logits, end_logits, and headword_logits which are the logits respectively for the start,
# end and headword token positions tokens of shape [batch_size, sequence_length].
#
# Example usage:
# ```python
# # Already been converted into WordPiece token ids
# input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
# input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
# token_type_ids = torch.LongTensor([[0, 0, 0], [1, 1, 1]])
#
# config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
# num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
#
# model = BertForSpanPrediction(config)
# start_logits, end_logits, headword_logits = model(input_ids, token_type_ids, input_mask)
# ```
# """
#
# def __init__(self, config):
# super(BertForSpanWithHeadwordPrediction, self).__init__(config)
# self.bert = BertModel(config)
# # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# # self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.qa_outputs = nn.Linear(config.hidden_size, 3)
# self.apply(self.init_bert_weights)
#
# def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
# end_positions=None, headword_positions=None):
# sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
# logits = self.qa_outputs(sequence_output)
# # print (logits)
# # start_logits, end_logits, headwords_logits = logits.split(1, dim=-1)
# start_logits, end_logits, headword_logits = logits.split(1, dim=-1)
#
# start_logits = start_logits.squeeze(-1)
# end_logits = end_logits.squeeze(-1)
# headword_logits = headword_logits.squeeze(-1)
#
# if start_positions is not None and end_positions is not None and headword_positions is not None:
# # training
# # If we are on multi-GPU, split add a dimension
# if len(start_positions.size()) > 1:
# start_positions = start_positions.squeeze(-1)
# if len(end_positions.size()) > 1:
# end_positions = end_positions.squeeze(-1)
# if len(headword_positions.size()) > 1:
# headword_positions = end_positions.squeeze(-1)
# # sometimes the start/end positions are outside our model inputs, we ignore these terms
# ignored_index = start_logits.size(1)
# start_positions.clamp_(0, ignored_index)
# end_positions.clamp_(0, ignored_index)
# headword_positions.clamp_(0, ignored_index)
#
# loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
# start_loss = loss_fct(start_logits, start_positions)
# end_loss = loss_fct(end_logits, end_positions)
# headword_loss = loss_fct(headword_logits, headword_positions)
# total_loss = (start_loss + end_loss + headword_loss) / 3
# return total_loss
# else:
# # testing
# return start_logits, end_logits, headword_logits
| 80,774 | 49.962145 | 134 | py |
SPARQA | SPARQA-master/code/parsing/models/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 8,021 | 32.425 | 98 | py |
SPARQA | SPARQA-master/code/parsing/models/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import argparse
import tensorflow as tf
import torch
import numpy as np
from .modeling import BertConfig, BertForPreTraining
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
config_path = os.path.abspath(bert_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default = None,
type = str,
required = True,
help = "The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path)
| 4,463 | 38.504425 | 101 | py |
SPARQA | SPARQA-master/code/grounding/grounding_utils.py | from common_structs.graph import Graph
from common_structs.grounded_graph import GrounedGraph
from common_structs.depth_first_paths import DepthFirstPaths
from common_structs.graph import Digragh
from common_structs.cycle import DirectedCycle
import mmap
import torch
def posword_wordlist(posword_list):
'''get word list of posword'''
word_list = list()
for pos_word in posword_list:
word = pos_word.split("\t")[1].lower()
word_list.append(word)
return word_list
def posword_poslist(posword_list):
'''get pos list of posword'''
pos_list = list()
for pos_word in posword_list:
pos = pos_word.split("\t")[0]
pos_list.append(pos)
return pos_list
# >=3
def generate_n_gram_indexrange(wordlist):
'''get >=3 gram'''
indexranges = set()
size = len(wordlist)
for i in range(0, size):
for j in range(i + 2, size):
indexranges.add(str(i) + "\t" + str(j))
return indexranges
# <=2
def generate_biunigram_indexrange(wordlist):
'''get <=2 gram'''
indexranges = set()
size = len(wordlist)
for i in range(0, size-1):
indexranges.add(str(i) + '\t' + str(i))
indexranges.add(str(i) + "\t" + str(i + 1))
return indexranges
def merge_dict(dict1, dict2):
''''''
diction =dict()
for indexrange in dict1:
if "\t" not in indexrange:
indexrange_new="\t".join([indexrange,indexrange])
diction[indexrange_new]=dict1[indexrange]
else:
diction[indexrange] = dict1[indexrange]
for indexrange in dict2:
if indexrange not in diction:
if "\t" not in indexrange:
indexrange_new = "\t".join([indexrange, indexrange])
diction[indexrange_new] = dict2[indexrange]
else:
diction[indexrange] = dict2[indexrange]
return diction
def get_old_mention(new_mention):
'''
old_mention = get_old_mention('Yes we can !')
All honor 's wounds are self-inflicted The devil is God 's ape ! 's 前者合并
if ?后缀, 则删除
El Gouna Beverage Co. Sakara Gold beer is produced El Gouna Beverage Co . Sakara Gold 与前者合并
Forgive your enemies, but never forget their names Forgive your enemies , but never forget their names . 有标点符号的话,都与前者合并
King : A Filmed Record ... Montgomery to Memphis King: A Film Record...Montgomery to Memphis
Columbia Lions men 's basketball team
Saami , Lule Language
The future is ... black . The future is... black.
The Climb? ?结尾的, 就删掉
Canzone , S. 162 no. 2 "How did the composer of \"Canzone, S. 162 no. 2\" earn a living?",
William DeWitt , Jr. William DeWitt, Jr. is
Christmas ( 2011 ) Christmas (2011)
Yes we can ! Yes we can!
:param new_mention:
:return: old mention
'''
tokens = new_mention.replace('?', '').split(' ')
old_mention_list = []
for i, token in enumerate(tokens):
if token in ['\'s', '.', ',', '...', '!'] and i > 0:
old_mention_list.pop()
old_mention_list.append(tokens[i-1]+token)
else:
old_mention_list.append(token)
old_mention = ' '.join(old_mention_list)
return old_mention
'''sum all four lexicons'''
def add_dict_number(entity_pro_sum, entity_pro_partial):
'''
:param entity_pro_sum:
:param entity_pro_partial:
:return: entity_pro_sum, sum dict
'''
for entity in entity_pro_partial:
if entity in entity_pro_sum:
entity_pro_sum[entity] = entity_pro_sum[entity] + entity_pro_partial[entity]
else:
entity_pro_sum[entity] = entity_pro_partial[entity]
return entity_pro_sum
def get_question_node(nodes):
result = None
for node in nodes:
if is_question_node(node):
result = node
break
return result
def is_question_node(node):
'''check if node is class node'''
class_list = ["class"]
if node.node_type in class_list and node.question_node == 1:
return True
else:
return False
def analysis_structure_category(_2_1_graph):
'''2.1 query graph structure'''
g = Graph()
for edge in _2_1_graph.edges:
g.add_edge(edge.start, edge.end)
question_node = get_question_node(_2_1_graph.nodes)
if question_node is None:
return None, None
dfp = DepthFirstPaths(g, question_node.nid)
path_list = []
for node in _2_1_graph.nodes:
if node.nid == question_node.nid:
continue
if _2_1_graph.get_node_degree(node) > 1:
continue
if dfp.has_path_to(node.nid):
path_to_list = [i for i in dfp.path_to(node.nid)]
path_list.append(path_to_list)
category = 'other' # composition, conjunction
if len(path_list) == 1:
if len(path_list[0]) == 2:
category = "composition-0" #[[1, 2]]
else:
category = "composition-1" #[[1, 2, 3, 4]]
elif len(path_list) == 2:
category = "conjunction"
print('#category:\t', category, path_list) ## composition-1 [[1, 2, 3]]
return category, path_list
def is_undate_ungrounded_graph_cycle(ungrounded_graph):
'''破圈操作: 包含e-e或e-l或l-l的圈,要把它们破开。有圈情况: event型问句,
比如what were the compositions made by bach in 1749; O并列; VP 并列; 修饰疑问短语,挂到了动词身上'''
has_cycle = False
ungrounded_graph_edges = ungrounded_graph.edges
di_graph = Digragh()
for edge in ungrounded_graph_edges:
di_graph.add_edge(edge.start, edge.end)
di_graph.add_edge(edge.end, edge.start)
directed_cycle = DirectedCycle(di_graph)
if len(directed_cycle.all_cycles) > 0:
has_cycle = True
return has_cycle
def read_literal_to_id_map(mode, file_root):
literal_to_id_dict = dict()
lines = list()
with open(file_root+'test_group_2_literal.txt', 'r', encoding='utf-8') as f:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
line = mm.readline()
while line:
lines.append(line.decode().strip())
line = mm.readline()
mm.close()
f.close()
if mode == 'cwq':
for i, line in enumerate(lines):
cols = line.split('\t')
if cols[1] == 'cuowu': continue
literal_to_id_dict[cols[0]] = i
# s_p = get_s_p_byliteral(i, cols[1])
elif mode == 'graphq':
#11 1.88 6258
for i, line in enumerate(lines):
cols = line.split('\t')
literal_to_id_dict[cols[1]] = cols[0]
return literal_to_id_dict
def candidate_query_to_grounded_graph(candidate_graphquerys):
result = []
for candidate_graphquery in candidate_graphquerys:
result.append(
GrounedGraph(type=candidate_graphquery["querytype"],
nodes=candidate_graphquery["nodes"],
edges=candidate_graphquery["edges"],
key_path=candidate_graphquery["path"],
denotation=candidate_graphquery['denotation']))
return result
def convert_2_1_graph_to_qid_entities(_2_1_graph):
entities_list = []
for node in _2_1_graph.nodes:
if node.node_type == 'entity':
entities_list.append([node.id, node.node_type])
return entities_list
def load_word2vec_format(file):
'''
print(time.time())
ab=load_word2vec_format(root+'/glove.6B.300d.txt')
print(time.time())'''
matrix=dict()
with open(file, errors='ignore',encoding='utf8') as f:
for line in f:
line = line.rstrip().split(' ')
word = line[0]
vector = line[1:]
col=[]
for v in vector:
col.append(float(v))
matrix[word]=torch.Tensor(col)
return matrix
def extract_class_mention(node_mention, wh_words_set):
# 去掉疑问词, 再linking, such as, delete wh-words What type of government -> type of governmet
node_mention_word_list = node_mention.split(' ')
node_mention_new_word_list = []
for node_mention_word in node_mention_word_list:
if node_mention_word not in wh_words_set:
node_mention_new_word_list.append(node_mention_word)
# if len(node_mention_new_word_list) == 0: return dict()
node_mention = ' '.join(node_mention_new_word_list)
node_mention = node_mention.replace('type of', '').strip()
return node_mention
def read_literal_to_id_map_cwq(file_root):
literal_to_id_dict = dict()
lines = list()
with open(file_root + 'test_group_2_literal.txt', 'r', encoding='utf-8') as f:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
line = mm.readline()
while line:
lines.append(line.decode().strip())
line = mm.readline()
mm.close()
f.close()
for i, line in enumerate(lines):
cols = line.split('\t')
if cols[1] == 'cuowu':
continue
literal_to_id_dict[cols[0]] = i
# s_p = get_s_p_byliteral(i, cols[1])
return literal_to_id_dict
def read_literal_to_id_map_graphq(file_root):
literal_to_id_dict = dict()
lines = list()
with open(file_root+'test_group_2_literal.txt', 'r', encoding='utf-8') as f:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
line = mm.readline()
while line:
lines.append(line.decode().strip())
line = mm.readline()
mm.close()
f.close()
#11 1.88 6258
for i, line in enumerate(lines):
cols = line.split('\t')
literal_to_id_dict[cols[1]] = cols[0]
return literal_to_id_dict
| 9,563 | 33.652174 | 124 | py |
SPARQA | SPARQA-master/code/grounding/ranking/path_match_nn/path_match_interface.py | import torch
from torch.autograd import Variable
from common.globals_args import fn_graph_file, fn_cwq_file, kb_freebase_latest_file, kb_freebase_en_2013, q_mode as mode
from common.hand_files import read_json
from grounding.ranking.path_match_nn.parameters import get_parameters
from grounding.ranking.path_match_nn.wordvec import WordEmbedding
from grounding.ranking.path_match_nn import path_match_word_utils
class PathMatchByLexicalNN():
def __init__(self):
self.model_parameters = get_parameters()
self.define_cuda()
self.set_model_data()
def set_model_data(self):
assert mode in ['cwq', 'graphq']
if mode=='cwq':
model_file=fn_cwq_file.model_file+"_iter_{}_devf1_{}_model.pt".format(2720, 52)
self.relortype_level_word = read_json(kb_freebase_latest_file.dataset + "relortype_level_words.json")
elif mode == 'graphq':
model_file = fn_graph_file.model_file + "_iter_{}_devf1_{}_model.pt".format(570, 48)
self.relortype_level_word = read_json(kb_freebase_en_2013.dataset + "relortype_level_words.json")
if self.model_parameters.gpu >= 0:
self.model = torch.load(model_file,map_location=lambda storage, location: storage.cuda(self.model_parameters.gpu))
else:
self.model = torch.load(model_file,map_location=lambda storage, location: storage)
self.model.eval()
self.wem = WordEmbedding()
# self.pretrained_embedding = torch.load(self.model_parameters.vector_cache_file)
# self.word_dict = torch.load(self.model_parameters.word_dict_file)
# self.word_pair_sim = torch.load(fn_cwq_file.question_match_dir + 'word_pair_sim.pt')
# self.pad_index = self.word_dict.lookup(self.word_dict.pad_token)
self.word_pair_sim = dict()
def define_cuda(self):
self.model_parameters.cuda = False
torch.manual_seed(self.model_parameters.seed)
if not self.model_parameters.cuda:
self.model_parameters.gpu = -1
if torch.cuda.is_available() and self.model_parameters.cuda:
print("Note: You are using GPU for training")
torch.cuda.set_device(self.model_parameters.gpu)
torch.cuda.manual_seed(self.model_parameters.seed)
if torch.cuda.is_available() and not self.model_parameters.cuda:
print("Warning: You have Cuda but do not use it. You are using CPU for training")
# get the path pro given importantwords
def get_path_pro(self, candidate, importantwords_list):
# score=0.0
# max_relortype_word = 3
#11 dim
pos_ques_pathsimmax = torch.Tensor(self.model_parameters.max_question_word).zero_()
#16 dim
pos_path_quessimmax = torch.Tensor(self.model_parameters.max_relortype_word).zero_()
pos_path_len = torch.tensor(0)
pos_ques_pathsimmax_list = torch.Tensor(1, self.model_parameters.max_question_word)
pos_path_quessimmax_list = torch.Tensor(1, self.model_parameters.max_relortype_word)
pos_path_len_list = torch.Tensor(1)
# pos_ques_path_sim_list[0:end-start]=self.pos_ques_path_sim_list[start:end]
for j in range(1):
pos_ques_pathsimmax_list[j] = pos_ques_pathsimmax
for j in range(1):
pos_path_quessimmax_list[j] = pos_path_quessimmax
for j in range(1):
pos_path_len_list[j] = pos_path_len
neg_size = 1
neg_ques_path_sim = torch.Tensor(neg_size, self.model_parameters.max_question_word, self.model_parameters.max_relortype_word).zero_()
neg_path_quessimmax = torch.Tensor(neg_size, self.model_parameters.max_relortype_word).zero_()
neg_ques_pathsimmax = torch.Tensor(neg_size, self.model_parameters.max_question_word).zero_()
neg_path_len = torch.Tensor(neg_size).zero_()
neg_path_len[0] = torch.tensor(len(candidate.split("\t")))
firstpart = path_match_word_utils.get_firstparts_by_path(candidate, self.relortype_level_word)
if len(importantwords_list) > self.model_parameters.max_question_word:
# importantwords_list = importantwords_by_unimportant(importantwords_list)
importantwords_list = importantwords_list[:self.model_parameters.max_question_word]
for i, word in enumerate(importantwords_list):
if i < self.model_parameters.max_question_word:
for j, pathword in enumerate(firstpart):
if j < self.model_parameters.max_relortype_word:
neg_ques_path_sim[0][i][j] = path_match_word_utils.get_word_pair_sim(
word1=word, word2=pathword, wem=self.wem, word_pair_sim=self.word_pair_sim)
neg_ques_pathsimmax[0], index = torch.max(neg_ques_path_sim[0], 1)
neg_path_quessimmax[0], index = torch.max(neg_ques_path_sim[0], 0)
neg_ques_pathsimmax_list = torch.Tensor(1, neg_size, self.model_parameters.max_question_word)
neg_path_quessimmax_list = torch.Tensor(1, neg_size, self.model_parameters.max_relortype_word)
neg_path_len_list = torch.Tensor(1, neg_size)
for j in range(1):
neg_ques_pathsimmax_list[j] = neg_ques_pathsimmax
for j in range(1):
neg_path_quessimmax_list[j] = neg_path_quessimmax
for j in range(1):
neg_path_len_list[j] = neg_path_len
if self.model_parameters.gpu >= 0:
pos_ques_pathsimmax_list = pos_ques_pathsimmax_list.cuda()
pos_path_quessimmax_list = pos_path_quessimmax_list.cuda()
pos_path_len_list = pos_path_len_list.cuda()
neg_ques_pathsimmax_list = neg_ques_pathsimmax_list.cuda()
neg_path_quessimmax_list = neg_path_quessimmax_list.cuda()
neg_path_len_list = neg_path_len_list.cuda()
pos_score, neg_score = self.model(
[Variable(pos_ques_pathsimmax_list), Variable(pos_path_quessimmax_list), Variable(pos_path_len_list),
Variable(neg_ques_pathsimmax_list), Variable(neg_path_quessimmax_list), Variable(neg_path_len_list)])
score = neg_score[0].detach().numpy().tolist()[0]
return score
if __name__=="__main__":
pmnn = PathMatchByLexicalNN()
print(pmnn.get_path_pro("astronomy.celestial_object.magnitude\tastronomy.celestial_object",["celestial","object","largest","apparent","magnitude"]))
| 6,405 | 52.383333 | 152 | py |
SPARQA | SPARQA-master/code/grounding/ranking/path_match_nn/train_test_path_nn.py | # -*- coding: utf-8 -*-
import torch
import torch.optim as optim
from common.globals_args import root, fn_graph_file
from grounding.ranking.path_match_nn.sequence_loader import SeqRankingLoader
from grounding.ranking.path_match_nn.model import PathRanking
from grounding.ranking.path_match_nn.parameters import get_parameters
model_parameters = get_parameters()
model_parameters.cuda=False
torch.manual_seed(model_parameters.seed)
if not model_parameters.cuda:
model_parameters.gpu = -1
if torch.cuda.is_available() and model_parameters.cuda:
print("Note: You are using GPU for training")
torch.cuda.set_device(model_parameters.gpu)
torch.cuda.manual_seed(model_parameters.seed)
if torch.cuda.is_available() and not model_parameters.cuda:
print("Warning: You have Cuda but do not use it. You are using CPU for training")
def train(train_file,val_file,model_file):
model = PathRanking(model_parameters=model_parameters)
train_loader = SeqRankingLoader(train_file,model_parameters, model_parameters.gpu)
val_loader = SeqRankingLoader(val_file,model_parameters, model_parameters.gpu)
if model_parameters.cuda:
model.cuda()
print("Shift model to GPU")
for name, param in model.named_parameters():
print(name, param.size())
criterion = torch.nn.MarginRankingLoss(model_parameters.loss_margin) # Max margin ranking loss function
# print(model_parameters.lr)
optimizer = optim.Adam(model.parameters(), lr=model_parameters.lr)
iterations=0
best_dev_acc=0.0
iters_not_improved=0
early_stop=False
patience=1000000*train_loader.batch_num/model_parameters.dev_every
for epoch in range(1, model_parameters.epochs + 1):
if early_stop:
print("Early stopping. Epoch: {}, Best Dev. Acc: {}".format(epoch, best_dev_acc))
break
n_correct, n_total = 0, 0
for batch_idx, batch in enumerate(train_loader.next_batch()):
iterations+=1
model.train()
optimizer.zero_grad()
pos_score, neg_score = model(batch)
n_correct += (torch.sum(torch.ge(pos_score, neg_score), 1).data == neg_score.size(1)).sum()
n_total += pos_score.size(0)
train_acc = 100. * n_correct / n_total
ones = torch.autograd.Variable(torch.ones(pos_score.size(0) ,pos_score.size(1)))
if model_parameters.cuda:
ones = ones.cuda()
loss = criterion(pos_score, neg_score, ones)
print('epoch {} batch {}: loss per sentence: {}, accuracy:{}'.format(epoch,batch_idx, loss,train_acc))
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), model_parameters.clip_gradient)
optimizer.step()
if iterations % model_parameters.dev_every == 0:
model.eval()
total = 0
correct = 0
for data_batch in val_loader.next_batch():
pos_score, neg_score = model(data_batch)
correct += (torch.sum(torch.ge(pos_score, neg_score), 1).data == neg_score.size(1)).sum()
total += pos_score.size(0)
dev_acc = 100. * correct / total
print('validation accuracy:{}'.format(dev_acc))
# update model
if dev_acc > best_dev_acc:
best_dev_acc = dev_acc
iters_not_improved = 0
snapshot_path = model_file+ '_iter_{}_devf1_{}_model.pt'.format(iterations, best_dev_acc)
torch.save(model, snapshot_path)
else:
iters_not_improved += 1
if iters_not_improved > patience:
early_stop = True
break
if __name__=='__main__':
# resources_webq = root + '/dataset_cwq_1_1/'
# data_path_match = resources_webq + 'data_path_match/'
# model_file = data_path_match+'models/'
# train_file = data_path_match + "train_pathranking_samestructure.pt"
# val_file = data_path_match + "valid_pathranking_samestructure.pt"
# train(train_file, val_file, model_file)
model_file = fn_graph_file.model_file
train_file =fn_graph_file.path_match_dir + "train_pathranking_samestructure.pt"
val_file = fn_graph_file.path_match_dir + "valid_pathranking_samestructure.pt"
train(train_file, val_file, model_file)
# model_file = fn_cwq_file.model_file
# train_file =fn_cwq_file.path_match_dir + "train_pathranking_samestructure.pt"
# val_file = fn_cwq_file.path_match_dir + "valid_pathranking_samestructure.pt"
# train(train_file, val_file, model_file)
| 4,703 | 45.574257 | 114 | py |
SPARQA | SPARQA-master/code/grounding/ranking/path_match_nn/wordvec.py | import time
import torch
from grounding.grounding_args import glove_file
from grounding.grounding_utils import load_word2vec_format
class WordEmbedding():
def __init__(self):
# self.pretrained = dict()
self.pretrained = load_word2vec_format(glove_file)
self.train_generation_embedding = dict()
self.ten = torch.Tensor(len(self.pretrained), len(list(self.pretrained.values())[0]))
for i, val in enumerate(list(self.pretrained.values())):
self.ten[i] = val
self.scale = torch.std(self.ten)
# self.scale =1
def get_word_embedding(self,word):
random_range = (-self.scale, self.scale)
if word in self.pretrained:
return self.pretrained[word]
elif word in self.train_generation_embedding:
return self.train_generation_embedding[word]
else:
self.train_generation_embedding[word]=torch.Tensor(300)
torch.nn.init.uniform(self.train_generation_embedding[word],random_range[0], random_range[1])
return self.train_generation_embedding[word]
| 1,100 | 38.321429 | 105 | py |
SPARQA | SPARQA-master/code/grounding/ranking/path_match_nn/model.py | import torch
from torch import nn
class PathRanking(nn.Module):
def __init__(self,model_parameters):
super(PathRanking, self).__init__()
self.model_parameters=model_parameters
self.fc1 = nn.Sequential(nn.Linear(self.model_parameters.max_question_word, 1))
self.fc2 = nn.Sequential(nn.Linear(self.model_parameters.max_relortype_word, 1))
self.fc3 = nn.Sequential(nn.Linear(2, 1))
# only first part of path
def forward(self, batch):
# pos_pos_ques_path_sim_list:batchsize*max_question_word*max_relortype_word
# neg_ques_path_sim_list:batchsize*neg_size*max_question_word*max_relortype_word
# pos_path_len_list:batchsize (len(positive_path.split("\t")))
# neg_path_len_list:batchsize*neg_size (len(positive_path.split("\t")))
# ques_index_list:batchsize*max_question_word
pos_ques_pathsimmax_list, pos_path_quessimmax_list, pos_path_len_list,\
neg_ques_pathsimmax_list, neg_path_quessimmax_list, neg_path_len_list = batch
batch_size = len(pos_ques_pathsimmax_list)
neg_size = len(neg_path_len_list[0])
# print(pos_ques_pathsimmax_list.shape)
pos_ques_z1=self.fc1(pos_ques_pathsimmax_list).squeeze(1)
pos_path_z2=self.fc2(pos_path_quessimmax_list).squeeze(1)
# print(pos_ques_z1.shape)
pos_z1_z2 = torch.Tensor(2,len(pos_ques_z1))
pos_z1_z2[0]=pos_ques_z1
pos_z1_z2[1]=pos_path_z2
pos_z1_z2=pos_z1_z2.transpose(0,1)
if self.model_parameters.cuda:
pos_z1_z2 = pos_z1_z2.cuda()
pos_score=self.fc3(pos_z1_z2).squeeze(1)
# print(pos_score.shape)
# print(pos_path_len_list.shape)
# pos_score=pos_score/pos_path_len_list
# print(pos_score.shape)
pos_score = pos_score.view(batch_size,1).expand(batch_size, neg_size)
neg_ques_z1 = self.fc1(neg_ques_pathsimmax_list).squeeze(2)
neg_path_z2 = self.fc2(neg_path_quessimmax_list).squeeze(2)
# neg_z1_z2 = torch.Tensor(len(neg_ques_z1),len(neg_ques_z1[0]), 2)
neg_z1_z2 = torch.Tensor(2, len(pos_ques_z1),len(neg_ques_z1[0]),)
neg_z1_z2[0] = neg_ques_z1
neg_z1_z2[1] = neg_path_z2
neg_z1_z2 = neg_z1_z2.transpose(0, 1)
neg_z1_z2 = neg_z1_z2.transpose(1, 2)
if self.model_parameters.cuda:
neg_z1_z2 = neg_z1_z2.cuda()
neg_score = self.fc3(neg_z1_z2).squeeze(2)
# print(neg_score.shape)
# neg_score = neg_score / neg_path_len_list
return pos_score, neg_score
| 2,577 | 45.872727 | 89 | py |
SPARQA | SPARQA-master/code/grounding/ranking/path_match_nn/sequence_loader.py | import torch
from torch.autograd import Variable
class SeqRankingLoader():
def __init__(self, infile, model_parameters,device=-1):
self.pos_ques_pathsimmax_list, self.pos_path_quessimmax_list, self.pos_path_len_list,\
self.neg_ques_pathsimmax_list, self.neg_path_quessimmax_list, self.neg_path_len_list= torch.load(infile)
# print(len(self.neg_path_quessimmax_list))
self.model_parameters=model_parameters
self.batch_size = model_parameters.batch_size
self.batch_num = int(len(self.pos_ques_pathsimmax_list)/self.batch_size)+1
print("batch_num",self.batch_num)
self.device=device
def next_batch(self, shuffle=True):
device = self.device
if shuffle:
indices = torch.randperm(self.batch_num)
else:
indices = range(self.batch_num)
indices = indices.numpy()
for i in indices:
if i * self.batch_size < len(self.pos_ques_pathsimmax_list):
start = i * self.batch_size
end = (i + 1) * self.batch_size
if end > len(self.pos_ques_pathsimmax_list):
end = len(self.pos_ques_pathsimmax_list)
# print(end)
# print(start)
# print(i)
pos_ques_pathsimmax_list = torch.Tensor(end - start, self.model_parameters.max_question_word)
pos_path_quessimmax_list = torch.Tensor(end - start, self.model_parameters.max_relortype_word)
pos_path_len_list = torch.Tensor(end - start)
neg_ques_pathsimmax_list = torch.Tensor(end - start, self.model_parameters.neg_size,self.model_parameters.max_question_word)
neg_path_quessimmax_list = torch.Tensor(end - start,self.model_parameters.neg_size, self.model_parameters.max_relortype_word)
neg_path_len_list = torch.Tensor(end - start, self.model_parameters.neg_size)
# pos_ques_path_sim_list[0:end-start]=self.pos_ques_path_sim_list[start:end]
for j in range(end - start):
pos_ques_pathsimmax_list[j] = self.pos_ques_pathsimmax_list[start + j]
for j in range(end - start):
pos_path_quessimmax_list[j] = self.pos_path_quessimmax_list[start + j]
for j in range(end - start):
pos_path_len_list[j] = self.pos_path_len_list[start + j]
for j in range(end - start):
neg_ques_pathsimmax_list[j] = self.neg_ques_pathsimmax_list[start + j]
for j in range(end - start):
neg_path_quessimmax_list[j] = self.neg_path_quessimmax_list[start + j]
for j in range(end - start):
neg_path_len_list[j] = self.neg_path_len_list[start + j]
if device >= 0:
pos_ques_pathsimmax_list = pos_ques_pathsimmax_list.cuda()
pos_path_quessimmax_list = pos_path_quessimmax_list.cuda()
pos_path_len_list = pos_path_len_list.cuda()
neg_ques_pathsimmax_list = neg_ques_pathsimmax_list.cuda()
neg_path_quessimmax_list = neg_path_quessimmax_list.cuda()
neg_path_len_list = neg_path_len_list.cuda()
yield Variable(pos_ques_pathsimmax_list), Variable(pos_path_quessimmax_list), Variable(pos_path_len_list)\
,Variable(neg_ques_pathsimmax_list), Variable(neg_path_quessimmax_list), Variable(neg_path_len_list)
| 3,553 | 54.53125 | 141 | py |
SPARQA | SPARQA-master/code/grounding/ranking/path_match_nn/path_match_word_utils.py | import collections
import torch
from sklearn.metrics.pairwise import cosine_similarity
def get_qid_abstractquestion(any_2_1):
print(len(any_2_1))
qid_abstractquestions=collections.defaultdict(set)
for one in any_2_1:
qid=one.qid
question=one.question
for ungrounded_graph in one.ungrounded_graph_forest:
question_=question
for node in ungrounded_graph.nodes:
if node.node_type == 'entity':
question_ = question_.replace(node.friendly_name, '<e>')
qid_abstractquestions[str(qid)].add(question_)
return qid_abstractquestions
def get_word_pair_sim_without_memory(word1, word2, wem):
sim = max((judge_twowords_samelemma(word1, word2)),cosine_similarity([wem.get_word_embedding(word1).numpy(), wem.get_word_embedding(word2).numpy()])[0][1])
return torch.tensor(float(sim))
def get_word_pair_sim(word1, word2, wem, word_pair_sim):
if word1 + '###' + word2 in word_pair_sim:
return word_pair_sim[word1 + '###' + word2]
else:
sim = max((judge_twowords_samelemma(word1, word2)), cosine_similarity([wem.get_word_embedding(word1).numpy(), wem.get_word_embedding(word2).numpy()])[0][1])
sim = torch.tensor(float(sim))
word_pair_sim[word1 + '###' + word2] = sim
return sim
def get_firstparts_by_path(path, relortype_level_word):
first_part = list()
for col in path.split("\t"):
if col in relortype_level_word:
if "0" in relortype_level_word[col]:
if len(relortype_level_word[col]["0"])>3:
first_part.extend(relortype_level_word[col]["0"][:3])
else:
first_part.extend(relortype_level_word[col]["0"])
if "1" in relortype_level_word[col]:
second_part=relortype_level_word[col]['1']
sc_unique=set(second_part)-set(first_part)
if len(sc_unique)>0:
first_part.append(list(sc_unique)[0])
else:
first_part.append(second_part[0])
return first_part
def judge_twowords_samelemma(word1,word2):
'''判断两个word是否有相同的lemma'''
word1_str=[w for w in word1]
word2_str=[w for w in word2]
len_min=min(len(word1_str),len(word2_str))
len_max=max(len(word1_str),len(word2_str))
if len_min<=1:
return 0
same=0
for i in range(0,len_min):
if word1_str[i]==word2_str[i]:
same+=1
else:
break
if float(same)/float(len_max)>=(float(3)/float(7)):
return 1
else:
return 0
| 2,622 | 37.014493 | 164 | py |
SPARQA | SPARQA-master/code/grounding/ranking/path_match_nn/preproccess_freebase.py | import os
import copy
import torch
from common.globals_args import fn_cwq_file, fn_graph_file, root, argument_parser, kb_freebase_latest_file, kb_freebase_en_2013
from common.hand_files import read_json, write_json, read_structure_file
import random
from grounding.ranking.path_match_nn.wordvec import WordEmbedding
from datasets_interface.question_interface.questions_utils import extract_grounded_graph_from_jena_freebase
from grounding.ranking.path_match_nn import path_match_word_utils
from grounding.ranking.path_match_nn import wordvec
from parsing import parsing_utils
from grounding.ranking.path_match_nn.parameters import get_parameters
model_parameters = get_parameters()
def conquer_cwq():
'''1'''
output_path = fn_cwq_file.dataset
output_folder_name = '/2019.04.12_cwq/'
output_file_folder = output_path + output_folder_name
input_file_folders=['2.2_train_oracle_0_500','2.2_train_oracle_500_1000','2.2_train_oracle_1000_1500','2.2_train_oracle_1500_2000',
'2.2_train_oracle_2000_2500', '2.2_train_oracle_2500_3000', '2.2_train_oracle_3000_3500']
all_files=list()
for file in input_file_folders:
infiles=os.listdir(output_file_folder + file)
for infile in infiles:
all_files.append(output_file_folder+file+'/'+infile)
#
# train_qid_to_grounded_graph_dict = complexwebquestion_interface.extract_grounded_graph_from_jena(globals_args.fn_cwq_file.complexwebquestion_train_bgp_dir)
# property_level_words = read_json(fn_cwq_file.freebase + "property_level_words.json")
# train_structure_with_2_1_grounded_graph_file = output_path + '/2019.05.01_cwq' + '/2.1/' + 'structures_with_2_1_ungrounded_graphs_train_0501_multi.json'
# train_2_1 = read_structure_file(train_structure_with_2_1_grounded_graph_file)
# train_qid_abstractquestions = get_qid_abstractquestion(train_2_1)
# train_data_generation_samestructure(train_qid_to_grounded_graph_dict,list(property_level_words.keys()),all_files,train_qid_abstractquestions)
'''2'''
# trainorval_data=read_json(fn_cwq_file.path_match_dir + "data_for_trainorval_list_samestructure.json")
# wem = WordEmbedding()
# property_level_words = read_json(fn_cwq_file.freebase + "property_level_words.json")
# create_data_for_trainorval(trainorval_data=trainorval_data,relortype_level_word=property_level_words,wem=wem,save_path=fn_cwq_file.path_match_dir+'trainorval_pathranking_samestructure.pt')
'''3'''
infile = fn_cwq_file.path_match_dir + "trainorval_pathranking_samestructure.pt"
out_file1 = fn_cwq_file.path_match_dir + "train_pathranking_samestructure.pt"
out_file2 = fn_cwq_file.path_match_dir + "valid_pathranking_samestructure.pt"
divide_train_val(infile, out_file1, out_file2)
def conquer_graphq():
'''1'''
# output_path = fn_graph_file.dataset
# # output_path = globals_args.argument_parser.output
# output_folder_name = '/output_graphq/'
# output_file_folder = output_path + output_folder_name
# input_file_folders=['2.2_train']
# all_files = list()
# for file in input_file_folders:
# infiles = os.listdir(output_file_folder + file)
# for infile in infiles:
# all_files.append(output_file_folder+file+'/'+infile)
# property_level_words = read_json(kb_freebase_en_2013.dataset + "/relortype_level_words.json")
# # property_level_words = read_json(fn_graph_file.dataset + "/dataset_freebase_graphq/relortype_level_words.json")
# qid_abstractquestions = read_json(fn_graph_file.question_match_dir+'qid_abstractquestion.json')
# train_data_generation_samestructure_graphq(list(property_level_words.keys()),all_files, qid_abstractquestions)
'''2'''
# trainorval_data=read_json(fn_graph_file.path_match_dir + "data_for_trainorval_list_samestructure.json")
# wem = WordEmbedding()
# property_level_words = read_json(kb_freebase_en_2013.dataset + "/relortype_level_words.json")
# # property_level_words = read_json(fn_graph_file.dataset + "/dataset_freebase_graphq/relortype_level_words.json")
# create_data_for_trainorval(trainorval_data=trainorval_data, relortype_level_word=property_level_words,
# wem=wem, save_path=fn_graph_file.path_match_dir+'trainorval_pathranking_samestructure.pt')
'''3'''
infile = fn_graph_file.path_match_dir + "trainorval_pathranking_samestructure.pt"
out_file1 = fn_graph_file.path_match_dir + "train_pathranking_samestructure.pt"
out_file2 = fn_graph_file.path_match_dir + "valid_pathranking_samestructure.pt"
divide_train_val(infile, out_file1, out_file2)
def conquer_cwq_0904():
resources_cwq = root + '/dataset_cwq_1_1/'
data_path_match = resources_cwq + 'data_path_match/'
train_cwq_bgp_filepath = resources_cwq + '/ComplexWebQuestions_train_bgp.txt'
# output_path = argument_parser.output
# output_file_folder = output_path + '/2019.06.03_webq'
output_path = resources_cwq + 'output_cwq'
train_structure_with_2_1_grounded_graph_file = output_path + '/2.1/' + 'structures_with_2_1_grounded_graph_all_train_head_0901_0_15000.json'
'''1'''
# input_file_folder = output_path + '/2.2_train/'
# all_files = list()
# infiles = os.listdir(input_file_folder)
# for infile in infiles:
# all_files.append(input_file_folder+'/'+infile)
# train_qid_to_grounded_graph_dict = extract_grounded_graph_from_jena_freebase(train_cwq_bgp_filepath)
# # property_level_words = read_json(fn_cwq_file.freebase + "property_level_words.json")
# property_level_words = read_json(kb_freebase_latest_file.dataset + "property_level_words.json")
# train_2_1 = read_structure_file(train_structure_with_2_1_grounded_graph_file)
# train_qid_abstractquestions = path_match_word_utils.get_qid_abstractquestion(train_2_1)
# train_data_generation_samestructure_wq(
# train_qid_to_grounded_graph_dict, list(property_level_words.keys()),
# all_files, train_qid_abstractquestions, mode='wq')
'''2'''
# trainorval_data = read_json(data_path_match + "data_for_trainorval_list_samestructure.json")
# wem = wordvec.WordEmbedding()
# # property_level_words = read_json(fn_cwq_file.freebase + "property_level_words.json")
# property_level_words = read_json(kb_freebase_latest_file.dataset + "property_level_words.json")
# create_data_for_trainorval(
# trainorval_data=trainorval_data, relortype_level_word=property_level_words,
# wem=wem, save_path=data_path_match+'trainorval_pathranking_samestructure.pt')
'''3'''
infile =data_path_match+ "trainorval_pathranking_samestructure.pt"
out_file1 = data_path_match + "train_pathranking_samestructure.pt"
out_file2 = data_path_match + "valid_pathranking_samestructure.pt"
divide_train_val(infile, out_file1, out_file2)
def train_data_generation_samestructure_graphq(propertys,files,qid_abstractquestions):
data_for_train_list = list()
for i,file in enumerate(files):
print(i,file)
data=read_structure_file(file)
qid=file.split('/')[-1].split('.')[0]
if len(qid_abstractquestions[qid])==0:
continue
negatives=list()
j=0
# join=True
for structure in data:
gold_path = []
predicates = []
# for edge in structure.gold_graph_query.edges:
# gold_path.append(edge.relation)
# predicates.append(edge.relation)
for edge in structure.gold_sparql_query['edges']:
gold_path.append(edge['relation'])
predicates.append(edge['relation'])
gold_path.sort()
gold_path = '\t'.join(gold_path)
for ungrounded_graph in structure.ungrounded_graph_forest:
for grounded_graph in ungrounded_graph.grounded_graph_forest:
path=grounded_graph.key_path
ps=path.split('\t')
ps.sort()
path='\t'.join(ps)
if j < model_parameters.neg_size and len(ps) == len(predicates) and path!=gold_path:
negatives.append(path)
j += 1
if j>0:
if j < model_parameters.neg_size:
while j < model_parameters.neg_size:
candidate = list()
for i in range(len(predicates)):
candidate.append(propertys[random.randint(0, len(propertys) - 1)])
candidate.sort()
candidate = "\t".join(candidate)
if candidate != gold_path and candidate not in negatives:
negatives.append(candidate)
j += 1
one=dict()
one["qid"] = qid
one["abstractquestion"] = (qid_abstractquestions[qid])
one["gold_path"] = gold_path
one["negatives"] = negatives
data_for_train_list.append(one)
else:
print('not join',qid)
write_json(data_for_train_list, fn_graph_file.path_match_dir + "data_for_trainorval_list_samestructure.json")
def train_data_generation_samestructure(train_qid_to_grounded_graph_dict,propertys,files,train_qid_abstractquestions,mode='cwq'):
data_for_train_list = list()
for i,file in enumerate(files):
print(i,file)
data=read_structure_file(file)
qid=file.split('/')[-1].split('.')[0]
if len(train_qid_abstractquestions[qid])==0:
continue
elif len(list(train_qid_abstractquestions[qid])[0])==0:
continue
gold_graph=train_qid_to_grounded_graph_dict[qid]
predicates = []
for edge in gold_graph.edges:
predicates.append(edge.friendly_name)
predicates.sort()
gold_path = '\t'.join(predicates)
negatives=list()
j=0
# join=True
for structure in data:
for ungrounded_graph in structure.ungrounded_graph_forest:
for grounded_graph in ungrounded_graph.grounded_graph_forest:
path=grounded_graph.key_path
ps=path.split('\t')
ps.sort()
path='\t'.join(ps)
if j < model_parameters.neg_size and len(ps) == len(predicates) and path!=gold_path:
negatives.append(path)
j += 1
if j>0:
if j < model_parameters.neg_size:
while j < model_parameters.neg_size:
candidate = list()
for i in range(len(predicates)):
candidate.append(propertys[random.randint(0, len(propertys) - 1)])
candidate.sort()
candidate = "\t".join(candidate)
if candidate != gold_path and candidate not in negatives:
negatives.append(candidate)
j += 1
one=dict()
one["qid"] = qid
one["abstractquestion"] = list(train_qid_abstractquestions[qid])[0]
one["gold_path"] = gold_path
one["negatives"] = negatives
data_for_train_list.append(one)
else:
print('not join',qid)
if mode=='cwq':
write_json(data_for_train_list,
fn_cwq_file.path_match_dir + "data_for_trainorval_list_samestructure.json")
def train_data_generation_samestructure_wq(train_qid_to_grounded_graph_dict, propertys, files, train_qid_abstractquestions, mode='cwq'):
data_for_train_list = list()
for i, file in enumerate(files):
print(i, file)
data = read_structure_file(file)
qid = file.split('/')[-1].split('.')[0]
if len(train_qid_abstractquestions[qid]) == 0:
continue
elif len(list(train_qid_abstractquestions[qid])[0]) == 0:
continue
# if 'WebQTrn-'+str(qid) not in train_qid_to_grounded_graph_dict:
# print('do not exist: WebQTrn-'+str(qid))
# continue
# gold_graph = train_qid_to_grounded_graph_dict['WebQTrn-'+str(qid)]
if qid not in train_qid_to_grounded_graph_dict:
print('do not exist: '+ qid)
continue
gold_graph = train_qid_to_grounded_graph_dict[qid]
predicates = []
for edge in gold_graph.edges:
predicates.append(edge.friendly_name)
predicates.sort()
gold_path = '\t'.join(predicates)
negatives=list()
j=0
for structure in data:
for ungrounded_graph in structure.ungrounded_graph_forest:
for grounded_graph in ungrounded_graph.grounded_graph_forest:
#path
path = grounded_graph.key_path
ps = path.split('\t')
ps.sort()
path = '\t'.join(ps)
if j < model_parameters.neg_size and len(ps) == len(predicates) and path != gold_path:
negatives.append(path)
j += 1
if j>0:
if j < model_parameters.neg_size:
while j < model_parameters.neg_size:
candidate = list()
for i in range(len(predicates)):
candidate.append(propertys[random.randint(0, len(propertys) - 1)])
candidate.sort()
candidate = "\t".join(candidate)
if candidate != gold_path \
and candidate not in negatives:
negatives.append(candidate)
j += 1
one = dict()
one["qid"] = qid
one["abstractquestion"] = list(train_qid_abstractquestions[qid])[0]
one["gold_path"] = gold_path
one["negatives"] = negatives
data_for_train_list.append(one)
else:
print('not join', qid)
write_json(data_for_train_list, root + '/dataset_cwq_1_1/data_path_match/data_for_trainorval_list_samestructure.json')
def create_data_for_trainorval(trainorval_data,relortype_level_word,wem,save_path):
qid_list=[]
pos_ques_pathsimmax_list=[]
pos_path_quessimmax_list=[]
neg_ques_pathsimmax_list = []
neg_path_quessimmax_list = []
pos_path_len_list=[]
neg_path_len_list=[]
for index, one in enumerate(trainorval_data):
print(one["qid"])
positive_ques_path_sim=torch.Tensor(
model_parameters.max_question_word, model_parameters.max_relortype_word).zero_()
pos_ques_pathsimmax=torch.Tensor(
model_parameters.max_question_word).zero_()
pos_path_quessimmax=torch.Tensor(
model_parameters.max_relortype_word).zero_()
abstractquestion = one["abstractquestion"]
importantwords_list = parsing_utils.get_importantwords_byabstractquestion(abstractquestion)
if len(importantwords_list)==0:
continue
positive_path = one["gold_path"]
candidates = one["negatives"]
positive_path_firstpart = path_match_word_utils.get_firstparts_by_path(positive_path, relortype_level_word)
for i, word in enumerate(importantwords_list):
if i < model_parameters.max_question_word:
for j, pathword in enumerate(positive_path_firstpart):
if j < model_parameters.max_relortype_word:
positive_ques_path_sim[i][j] = path_match_word_utils.get_word_pair_sim_without_memory(word, pathword, wem)
else:
print("goldpath>max_relortype_word", one["qid"])
pos_ques_pathsimmax,index=torch.max(positive_ques_path_sim,1)
pos_path_quessimmax,index=torch.max(positive_ques_path_sim,0)
pos_path_len = torch.tensor(len(positive_path.split("\t")))
pos_ques_pathsimmax_list.append(pos_ques_pathsimmax)
pos_path_quessimmax_list.append(pos_path_quessimmax)
pos_path_len_list.append(pos_path_len)
neg_ques_path_sim = torch.Tensor(model_parameters.neg_size,model_parameters.max_question_word,model_parameters.max_relortype_word).zero_()
neg_path_quessimmax = torch.Tensor(model_parameters.neg_size,model_parameters.max_relortype_word).zero_()
neg_ques_pathsimmax = torch.Tensor(model_parameters.neg_size,model_parameters.max_question_word).zero_()
neg_path_len=torch.Tensor(model_parameters.neg_size).zero_()
for k,candidate in enumerate(candidates):
neg_path_len[k]=torch.tensor(len(candidate.split("\t")))
firstpart = path_match_word_utils.get_firstparts_by_path(candidate, relortype_level_word)
for i, word in enumerate(importantwords_list):
if i < model_parameters.max_question_word:
for j,pathword in enumerate(firstpart):
if j < model_parameters.max_relortype_word:
sim = path_match_word_utils.get_word_pair_sim_without_memory(word,pathword, wem)
neg_ques_path_sim[k][i][j]=sim
neg_ques_pathsimmax[k],index = torch.max(neg_ques_path_sim[k],1)
neg_path_quessimmax[k],index = torch.max(neg_ques_path_sim[k],0)
neg_ques_pathsimmax_list.append(neg_ques_pathsimmax)
neg_path_quessimmax_list.append(neg_path_quessimmax)
neg_path_len_list.append(neg_path_len)
qid_list.append(str(one["qid"]))
torch.save((pos_ques_pathsimmax_list, pos_path_quessimmax_list, pos_path_len_list,
neg_ques_pathsimmax_list, neg_path_quessimmax_list,
neg_path_len_list, qid_list), save_path)
def divide_train_val(infile, out_file1, out_file2):
pos_ques_pathsimmax_list, pos_path_quessimmax_list, pos_path_len_list,\
neg_ques_pathsimmax_list, neg_path_quessimmax_list, neg_path_len_list,\
qid_list = torch.load(infile)
qid_shuffle_list=copy.deepcopy(qid_list)
print(qid_shuffle_list)
random.shuffle(qid_shuffle_list)
print(qid_shuffle_list)
train_size = int(len(qid_shuffle_list) / 5 * 4)
train_qidupper=qid_shuffle_list[0:train_size]
train_pos_ques_pathsimmax_list=list()
train_pos_path_quessimmax_list=list()
train_pos_path_len_list=list()
train_neg_ques_pathsimmax_list = list()
train_neg_path_quessimmax_list = list()
train_neg_path_len_list=list()
val_pos_ques_pathsimmax_list = list()
val_pos_path_quessimmax_list = list()
val_pos_path_len_list = list()
val_neg_ques_pathsimmax_list = list()
val_neg_path_quessimmax_list = list()
val_neg_path_len_list = list()
for i in range(len(qid_list)):
qidupper=(qid_list[i])
if qidupper in train_qidupper:
print(qidupper,"train")
train_pos_ques_pathsimmax_list.append(pos_ques_pathsimmax_list[i])
train_pos_path_quessimmax_list.append(pos_path_quessimmax_list[i])
train_neg_ques_pathsimmax_list.append(neg_ques_pathsimmax_list[i])
train_neg_path_quessimmax_list.append(neg_path_quessimmax_list[i])
train_pos_path_len_list.append(pos_path_len_list[i])
train_neg_path_len_list.append(neg_path_len_list[i])
else:
print(qidupper, "val")
val_pos_ques_pathsimmax_list.append(pos_ques_pathsimmax_list[i])
val_pos_path_quessimmax_list.append(pos_path_quessimmax_list[i])
val_neg_ques_pathsimmax_list.append(neg_ques_pathsimmax_list[i])
val_neg_path_quessimmax_list.append(neg_path_quessimmax_list[i])
val_pos_path_len_list.append(pos_path_len_list[i])
val_neg_path_len_list.append(neg_path_len_list[i])
torch.save((train_pos_ques_pathsimmax_list, train_pos_path_quessimmax_list, train_pos_path_len_list,train_neg_ques_pathsimmax_list, train_neg_path_quessimmax_list,train_neg_path_len_list),out_file1)
torch.save((val_pos_ques_pathsimmax_list, val_pos_path_quessimmax_list, val_pos_path_len_list,val_neg_ques_pathsimmax_list, val_neg_path_quessimmax_list,val_neg_path_len_list),out_file2)
if __name__=='__main__':
conquer_graphq()
# pass
# conquer()
# random.shuffle(['a','bc'])
| 20,297 | 49.618454 | 202 | py |
Bleualign | Bleualign-master/bleualign/utils.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: University of Zurich
# Author: Rico Sennrich
# For licensing information, see LICENSE
# Evaluation functions for Bleualign
from __future__ import division
from operator import itemgetter
def evaluate(options, testalign, goldalign, log_function):
goldalign = [(tuple(src),tuple(target)) for src,target in goldalign]
results = {}
paircounts = {}
for pair in [(len(srclist),len(targetlist)) for srclist,targetlist in goldalign]:
paircounts[pair] = paircounts.get(pair,0) + 1
pairs_normalized = {}
for pair in paircounts:
pairs_normalized[pair] = (paircounts[pair],paircounts[pair] / float(len(goldalign)))
log_function('\ngold alignment frequencies\n')
for aligntype,(abscount,relcount) in sorted(list(pairs_normalized.items()),key=itemgetter(1),reverse=True):
log_function(aligntype,end='')
log_function(' - ',end='')
log_function(abscount,end='')
log_function(' ('+str(relcount)+')')
log_function('\ntotal recall: ',end='')
log_function(str(len(goldalign)) + ' pairs in gold')
(tpstrict,fnstrict,tplax,fnlax) = recall((0,0),goldalign,[i[0] for i in testalign],log_function)
results['recall'] = (tpstrict,fnstrict,tplax,fnlax)
for aligntype in set([i[1] for i in testalign]):
testalign_bytype = []
for i in testalign:
if i[1] == aligntype:
testalign_bytype.append(i)
log_function('precision for alignment type ' + str(aligntype) + ' ( ' + str(len(testalign_bytype)) + ' alignment pairs)')
precision(goldalign,testalign_bytype,log_function)
log_function('\ntotal precision:',end='')
log_function(str(len(testalign)) + ' alignment pairs found')
(tpstrict,fpstrict,tplax,fplax) = precision(goldalign,testalign,log_function)
results['precision'] = (tpstrict,fpstrict,tplax,fplax)
return results
def precision(goldalign, testalign, log_function):
tpstrict=0
tplax=0
fpstrict=0
fplax=0
for (src,target) in [i[0] for i in testalign]:
if (src,target) == ((),()):
continue
if (src,target) in goldalign:
tpstrict +=1
tplax += 1
else:
srcset, targetset = set(src), set(target)
for srclist,targetlist in goldalign:
#lax condition: hypothesis and gold alignment only need to overlap
if srcset.intersection(set(srclist)) and targetset.intersection(set(targetlist)):
fpstrict +=1
tplax += 1
break
else:
fpstrict +=1
fplax +=1
log_function('false positive: ',2)
log_function((src,target),2)
if tpstrict+fpstrict > 0:
log_function('precision strict: ',end='')
log_function((tpstrict/float(tpstrict+fpstrict)))
log_function('precision lax: ',end='')
log_function((tplax/float(tplax+fplax)))
log_function('')
else:
log_function('nothing to find')
return tpstrict,fpstrict,tplax,fplax
def recall(aligntype, goldalign, testalign, log_function):
srclen,targetlen = aligntype
if srclen == 0 and targetlen == 0:
gapdists = [(0,0) for i in goldalign]
elif srclen == 0 or targetlen == 0:
log_function('nothing to find')
return
else:
gapdists = [(len(srclist),len(targetlist)) for srclist,targetlist in goldalign]
tpstrict=0
tplax=0
fnstrict=0
fnlax=0
for i,pair in enumerate(gapdists):
if aligntype == pair:
(srclist,targetlist) = goldalign[i]
if not srclist or not targetlist:
continue
elif (srclist,targetlist) in testalign:
tpstrict +=1
tplax +=1
else:
srcset, targetset = set(srclist), set(targetlist)
for src,target in testalign:
#lax condition: hypothesis and gold alignment only need to overlap
if srcset.intersection(set(src)) and targetset.intersection(set(target)):
tplax +=1
fnstrict+=1
break
else:
fnstrict+=1
fnlax+=1
log_function('not found: ',2),
log_function(goldalign[i],2)
if tpstrict+fnstrict>0:
log_function('recall strict: '),
log_function((tpstrict/float(tpstrict+fnstrict)))
log_function('recall lax: '),
log_function((tplax/float(tplax+fnlax)))
log_function('')
else:
log_function('nothing to find')
return tpstrict,fnstrict,tplax,fnlax
def finalevaluation(results, log_function):
recall_value = [0,0,0,0]
precision_value = [0,0,0,0]
for i,k in list(results.items()):
for m,j in enumerate(recall_value):
recall_value[m] = j+ k['recall'][m]
for m,j in enumerate(precision_value):
precision_value[m] = j+ k['precision'][m]
try:
pstrict = (precision_value[0]/float(precision_value[0]+precision_value[1]))
except ZeroDivisionError:
pstrict = 0
try:
plax =(precision_value[2]/float(precision_value[2]+precision_value[3]))
except ZeroDivisionError:
plax = 0
try:
rstrict= (recall_value[0]/float(recall_value[0]+recall_value[1]))
except ZeroDivisionError:
rstrict = 0
try:
rlax=(recall_value[2]/float(recall_value[2]+recall_value[3]))
except ZeroDivisionError:
rlax = 0
if (pstrict+rstrict) == 0:
fstrict = 0
else:
fstrict=2*(pstrict*rstrict)/(pstrict+rstrict)
if (plax+rlax) == 0:
flax=0
else:
flax=2*(plax*rlax)/(plax+rlax)
log_function('\n=========================\n')
log_function('total results:')
log_function('recall strict: ',end='')
log_function(rstrict)
log_function('recall lax: ',end='')
log_function(rlax)
log_function('')
log_function('precision strict: ',end='')
log_function(pstrict)
log_function('precision lax: '),
log_function(plax)
log_function('')
log_function('f1 strict: ',end='')
log_function(fstrict)
log_function('f1 lax: ',end='')
log_function(flax)
log_function('')
| 6,441 | 32.552083 | 129 | py |
acrobat_submission | acrobat_submission-main/utils.py | import pandas as pd
from PIL import ImageOps
from PIL import ImageFilter
import cv2
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
from torchvision import transforms
from torchvision import models
import torch
from openslide import OpenSlide
from PIL import Image
from lr_utils import WSIDataset
import random
from scipy.interpolate import Rbf
from pathlib import Path
def base_transforms():
transforms_ = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])
return transforms_
def filtered_patches(wsi, stride, white_thr, black_thr):
h, w = wsi.dimensions
thumb = np.array(wsi.get_thumbnail((h//stride, w//stride)).convert('L'))
arr = np.logical_and(thumb < white_thr, thumb > black_thr)
df = pd.DataFrame(columns=['dim1', 'dim2'])
df['dim1'], df['dim2'] = stride*np.where(arr)[1]+(stride//2), stride*np.where(arr)[0]+(stride//2)
return df
def foreground_detection_model(foreground_model_path):
model = models.resnet18().cuda()
model.fc = torch.nn.Linear(512, 2).cuda()
model.load_state_dict(torch.load(foreground_model_path))
model.eval()
return model
def get_foreground(model, wsi_path, batch_size=512, white_thr=230, black_thr=20, stride=64, downsample=32, workers=8):
wsi = OpenSlide(wsi_path)
df = filtered_patches(wsi, stride, white_thr, black_thr)
print(f'batches = {1 + (len(df)//batch_size)}')
dataset = WSIDataset(df, wsi, base_transforms())
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=workers)
preds = np.zeros(len(dataset))
with torch.no_grad():
for i, data in tqdm(enumerate(dataloader)):
out_ = model(data.cuda())
preds[batch_size*i:batch_size*i+data.shape[0]] = torch.argmax(out_, axis=1).cpu().numpy()
df['pred'] = preds.astype(int)
w, h = wsi.dimensions
tn = wsi.get_thumbnail((w//downsample, h//downsample)).convert('L')
foreground = np.zeros_like(tn)
df = df.loc[df['pred']==1].copy()
df['dim1'] = df['dim1']//downsample
df['dim2'] = df['dim2']//downsample
foreground[df.values.T[:2].astype(int)[1], df.values.T[:2].astype(int)[0]] = 1
return foreground
def bbox_helper(density, bins):
dmap = []
for i in range(0, len(density), len(density)//bins):
dmap.append(np.sum(density[i: i+len(density)//bins]))
for item in range(dmap.index(sorted(dmap)[-5]), len(dmap)):
if dmap[item] < sorted(dmap)[-5]//20: break
end = item+1 if item < len(density)-1 else item
for item in range(dmap.index(sorted(dmap)[-5]), 0, -1):
if dmap[item] < sorted(dmap)[-5]//20: break
start = item-1 if item > 1 else item
return start, end
def get_bbox_primary(foreground):
density_x = np.sum(foreground, axis=0)
bins = 100
start_x, end_x = bbox_helper(density_x, bins)
r, l = (len(density_x)//bins)*start_x, (len(density_x)//bins)*end_x
foreground = foreground[:, r:l]
density_y = np.sum(foreground, axis=1)
start_y, end_y = bbox_helper(density_y, bins)
t, b = (len(density_y)//bins)*start_y, (len(density_y)//bins)*end_y
return foreground[t:b, :], (t, b, r, l)
def get_bbox_secondory(arr):
r = np.where((np.cumsum(np.sum(arr, axis=0))/np.sum(arr))>0.05)[0][0]
l = np.where((np.cumsum(np.sum(arr, axis=0))/np.sum(arr))<0.95)[-1][-1]
t = np.where((np.cumsum(np.sum(arr, axis=1))/np.sum(arr))>0.05)[0][0]
b = np.where((np.cumsum(np.sum(arr, axis=1))/np.sum(arr))<0.95)[-1][-1]
return arr[t:b, r:l], (t, b, r, l)
def get_bbox(arr):
bbox = get_bbox_primary(arr)
if np.sum(bbox[0])/np.sum(arr)>0.8:
return bbox
bbox_sec = get_bbox_secondory(arr)
if np.sum(bbox[0])/np.sum(arr) > np.sum(bbox_sec[0])/np.sum(arr):
return bbox
return bbox_sec
def he_conv_input(wsi, box, fg, stride):
k = np.ones((7, 7))
fg = cv2.dilate(fg, k, 1)
bg = fg==0
wsi = OpenSlide(wsi)
w, h = wsi.dimensions
tn = wsi.get_thumbnail((w//stride, h//stride)).convert('L')
a, b, c, d = box
bg = bg[a:b, c:d]
image = Image.fromarray(np.array(tn)[a:b, c:d])
mask = np.array(ImageOps.equalize(image, mask=None))<50
image = ImageOps.invert(image)
image = np.array(image)
image[mask] = 255
min_, max_ = np.min(image), np.max(image)
image = (image-min_) / (max_-min_)
image = (image + 1) / 2
image = 255*image
image[bg] = 0
image = image.astype('uint8')
return image
def ihc_conv_input(ihc_path):
wsi = OpenSlide(ihc_path)
w, h = wsi.dimensions
tn = wsi.get_thumbnail((w//32, h//32)).convert('L')
tn = np.array(tn)
tn[tn<30] = 255
tn = Image.fromarray(tn)
tn = tn.filter(ImageFilter.BLUR)
tn = np.array(tn)
ihc = 255.0 - tn
ihc = ihc.astype(float)
return ihc
def pad_he(he_template):
if he_template.shape[0]%2==1:
pad = 1
he_template = cv2.copyMakeBorder((he_template).astype('uint8'), pad, 0, 0, 0, 0)
if he_template.shape[1]%2==1:
pad = 1
he_template = cv2.copyMakeBorder((he_template).astype('uint8'), 0, 0, 0, pad, 0)
if he_template.shape[0]>he_template.shape[1]:
pad = (he_template.shape[0]-he_template.shape[1])//2
he_template = cv2.copyMakeBorder((he_template).astype('uint8'), 0, 0, pad, pad, 0)
if he_template.shape[0]<he_template.shape[1]:
pad = (he_template.shape[1]-he_template.shape[0])//2
he_template = cv2.copyMakeBorder((he_template).astype('uint8'), pad, pad, 0, 0, 0)
pad = max(list(he_template.shape)) // 4
he_template = cv2.copyMakeBorder((he_template).astype('uint8'), pad, pad, pad, pad, 0)
return he_template
def pad_ihc(ihc, he_template):
pad = max(he_template.shape) // 2
transform_ = transforms.Pad([pad, pad, pad, pad])
ihc = Image.fromarray(ihc.astype('uint8'))
ihc = transform_(ihc)
ihc = np.array(ihc)
ihc = torch.Tensor(ihc).cuda()
return ihc
def rotation_matrix(he_template, astride, start, end):
c = he_template.shape[1]//2, he_template.shape[0]//2
num_planes = (end-start)//astride
rot_matrix = torch.zeros((num_planes, he_template.shape[0], he_template.shape[1]), requires_grad=False).cuda()
he_template = he_template.astype('uint8')
for plane in range(num_planes):
theta = start + (plane*astride)
M = cv2.getRotationMatrix2D(c, theta, 1.0)
rotated = cv2.warpAffine(he_template, M, (he_template.shape[1], he_template.shape[0]))
rot_matrix[plane] = torch.Tensor(rotated.astype(float))
torch.save(rot_matrix, 'rot_matrix_new.pt')
return rot_matrix
def max_conv(ihc, rot_matrix, stride):
max_ = -9999999
reg_data = None
for angle in range(rot_matrix.shape[0]):
input_ = ihc[(None,)*2]
weight = rot_matrix[angle][(None,)*2]
out_ = torch.nn.functional.conv2d(input_, weight, stride=stride)
if int(torch.max(out_))>max_:
max_ = torch.max(out_)
argmax = torch.where(out_==max_)
reg_data = stride*int(argmax[2][0]), stride*int(argmax[3][0]), angle
return reg_data
def register(he_template, ihc):
he_template = pad_he(he_template)
astride = 10
stride = 10
rot_matrix = rotation_matrix(he_template, astride, 0, 360)
ihc = pad_ihc(ihc, he_template)
x_strided, y_strided, angle_strided = max_conv(ihc, rot_matrix, stride)
angle_strided = astride*angle_strided
astride = 1
stride = 1
rot_matrix = rotation_matrix(he_template, astride, angle_strided-10, angle_strided+10)
pad = max(he_template.shape) // 2
ihc = ihc[x_strided-50:x_strided+50+(2*pad), y_strided-50:y_strided+50+(2*pad)]
x_, y_, angle_ = max_conv(ihc, rot_matrix, stride)
x_ = x_strided + x_ - 50
y_ = y_strided + y_ - 50
theta = angle_strided+angle_-10
return x_, y_, theta
def local_correction_samples(foreground, bbox, reg_data):
fg = foreground
k = np.ones((7, 7))
fg_dilate = cv2.dilate(fg, k, 1)
fg_erode = cv2.erode(fg_dilate, k, 2)
diff = fg_dilate - fg_erode
diff = diff>0
h, w = diff.shape
sampled_points = []
for i in range(0, w-32, 32):
for j in range(0, h-32, 32):
empty = np.zeros_like(diff)
start = i, j
end = i+32, j+32
rect = cv2.rectangle(empty.astype('uint8'), start, end, 255, -1)
rect = rect>0
points = np.logical_and(rect, diff)
num_points = len(np.where(points)[0])
if num_points>0:
sample = random.randint(0, num_points-1)
x, y = np.where(points)[0][sample], np.where(points)[1][sample]
sampled_points.append([x, y, 1])
else:
points = np.logical_and(rect, fg)
num_points = len(np.where(points)[0])
if num_points>0:
sample = random.randint(0, num_points-1)
x, y = np.where(points)[0][sample], np.where(points)[1][sample]
sampled_points.append([x, y, 0])
print(f"Number of sampled points = {len(sampled_points)}")
sampled_points = np.array(sampled_points)
sampled_points = 32 * sampled_points
df = pd.DataFrame(columns=['hx', 'hy'])
df['he_y'] = np.array(sampled_points).T[0]
df['he_x'] = np.array(sampled_points).T[1]
df['type'] = np.array(sampled_points).T[2]//32
x_map = ((int(bbox[2]) + int(bbox[3])) // 2) - int(reg_data[1])
y_map = ((int(bbox[0]) + int(bbox[1])) // 2) - int(reg_data[0])
angle_map = -int(reg_data[2])
cx_map = int(reg_data[1])
cy_map = int(reg_data[0])
df['x_map'] = x_map
df['y_map'] = y_map
df['angle_map'] = angle_map
df['cx_map'] = cx_map
df['cy_map'] = cy_map
df['ihc_x'] = df['he_x'] - (32*df['x_map'])
df['ihc_y'] = df['he_y'] - (32*df['y_map'])
df['cx_map_'] = 32*df['cx_map']
df['cy_map_'] = 32*df['cy_map']
df['ihc_x_'] = df.apply(lambda row: get_rotation(row['ihc_x'], row['ihc_y'], row['angle_map'], row['cx_map_'], row['cy_map_'])[0], axis=1)
df['ihc_y_'] = df.apply(lambda row: get_rotation(row['ihc_x'], row['ihc_y'], row['angle_map'], row['cx_map_'], row['cy_map_'])[1], axis=1)
df['ihc_x'] = df['ihc_x_'].astype(int)
df['ihc_y'] = df['ihc_y_'].astype(int)
vals = (df[['he_x', 'he_y', 'ihc_x', 'ihc_y']].values).astype(int)
df = df[['he_x', 'he_y', 'ihc_x', 'ihc_y', 'type']]
df['theta'] = reg_data[2]
# df = df.sample(frac=0.2)
# df.reset_index(inplace=True)
return df
def get_rotation(x, y, angle, cx, cy):
x, y, angle, cx, cy = int(x) ,int(y), int(angle), int(cx), int(cy)
M = cv2.getRotationMatrix2D((0, 0), angle, 1.0)[:, :2]
return (np.array([x-cx, y-cy])@M)+np.array([cx, cy])
def he_patch(he_path, idx, hx, hy):
he = OpenSlide(he_path)
ps = 2048*2
offset = ps // 2
rs = ps // 16
x, y = hx[idx], hy[idx]
hp = he.read_region((x-offset, y-offset), 0, (ps, ps)).convert('L').resize((rs, rs))
hp = np.array(hp)
hp[hp<30] = 255
hp[hp>230] = 255
hp = 255-hp
hp = Image.fromarray(hp)
hp = transforms.ToTensor()(hp)
hp_0 = hp.clone()
hp_1 = hp.clone()
hp_1[hp_1==0] = -1
hp_0[hp_0==0] = 0
hp_0 = hp_0[(None,)]
hp_1 = hp_1[(None,)]
return hp_0, hp_1
def ihc_patch(ihc_path, idx, ix, iy, theta):
ihc = OpenSlide(ihc_path)
ps = 2048*5
offset = ps // 2
rs = ps // 16
x, y = ix[idx], iy[idx]
ip = ihc.read_region((x-offset, y-offset), 0, (ps, ps)).convert('L').resize((rs, rs)).rotate(-theta)
ip = np.array(ip)
ip[ip<30] = 255
ip[ip>230] = 255
ip = 255-ip
ip = Image.fromarray(ip)
ip = transforms.ToTensor()(ip)
ip_0 = ip.clone()
ip_1 = ip.clone()
ip_1[ip_1==0] = -1
ip_0[ip_0==0] = 0
ip_0 = ip_0[(None,)]
ip_1 = ip_1[(None,)]
return ip_0, ip_1
def get_local_corrections(he_path, ihc_path, df):
df['xc_0'] = None
df['yc_0'] = None
df['xc_1'] = None
df['yc_1'] = None
df['h_0'] = None
df['w_0'] = None
df['h_1'] = None
df['w_1'] = None
df['area_0'] = None
df['area_1'] = None
hx, hy = df.values.T[0], df.values.T[1]
ix, iy = df.values.T[2], df.values.T[3]
theta = df['theta'][0]
for idx in tqdm(range(len(df))):
ip_0, ip_1 = ihc_patch(ihc_path, idx, ix, iy, theta)
hp_0, hp_1 = he_patch(he_path, idx, hx, hy)
out_0 = torch.nn.functional.conv2d(ip_0.cuda(), hp_0.cuda())
out_1 = torch.nn.functional.conv2d(ip_1.cuda(), hp_1.cuda())
np1 = out_1[0, 0].cpu().numpy()
np0 = out_0[0, 0].cpu().numpy()
max_0 = torch.where(out_0[0, 0]==torch.max(out_0))
max_1 = torch.where(out_1[0, 0]==torch.max(out_1))
xc_0, yc_0 = int(max_0[0][0])-192, int(max_0[1][0])-192
xc_1, yc_1 = int(max_1[0][0])-192, int(max_1[1][0])-192
bin_array = np0>(np.max(np0)*0.95)
h_0 = np.max(np.where(bin_array)[0]) - np.min(np.where(bin_array)[0])
w_0 = np.max(np.where(bin_array)[1]) - np.min(np.where(bin_array)[1])
num_pixels_0 = np.sum(bin_array)
m = 1.05 if np.max(np1)<0 else 0.95
bin_array = np1>(np.max(np1)*m)
h_1 = np.max(np.where(bin_array)[0]) - np.min(np.where(bin_array)[0])
w_1 = np.max(np.where(bin_array)[1]) - np.min(np.where(bin_array)[1])
num_pixels_1 = np.sum(bin_array)
df.iloc[idx, 6] = xc_0
df.iloc[idx, 7] = yc_0
df.iloc[idx, 8] = xc_1
df.iloc[idx, 9] = yc_1
df.iloc[idx, 10] = h_0
df.iloc[idx, 11] = w_0
df.iloc[idx, 12] = h_1
df.iloc[idx, 13] = w_1
df.iloc[idx, 14] = num_pixels_0
df.iloc[idx, 15] = num_pixels_1
return df
def remove_smooth(df):
df.drop_duplicates(inplace=True)
df['box_0'] = df['area_0']<500
df['box_1'] = df['area_1']<500
idx_thr = 160
df['idx_0'] = (abs(df['xc_0'])<idx_thr)&(abs(df['yc_0'])<idx_thr)
df['idx_1'] = (abs(df['xc_1'])<idx_thr)&(abs(df['yc_1'])<idx_thr)
df['check_0'] = df['box_0']&df['idx_0']
df['check_1'] = df['box_1']&df['idx_1']
df = df.loc[df['check_0']|df['check_1']].copy()
df['xc'] = df.apply(lambda row: select_xc(row['xc_0'], row['xc_1'], row['check_0'], row['check_1']), axis=1)
df['yc'] = df.apply(lambda row: select_xc(row['yc_0'], row['yc_1'], row['check_0'], row['check_1']), axis=1)
df = df.reset_index(drop=True)
df = df[['he_x', 'he_y', 'xc', 'yc']]
df['he_x'] //= 32
df['he_y'] //= 32
return df
def remove_outliers(df):
hx = df.values.T[0].astype(int)
hy = df.values.T[1].astype(int)
xc = df.values.T[2].astype(int)
yc = df.values.T[3].astype(int)
potential_outliers = [100]
while True:
df['correction'] = np.sqrt(df['xc']**2 + df['yc']**2)
mat = np.zeros((len(df), len(df)))
for i in range(len(df)):
ref = df.iloc[i]
for j in range(len(df)):
if i==j: continue
compare = df.iloc[j]
val_diff = np.sqrt((ref['xc']-compare['xc'])**2 + (ref['yc']-compare['yc'])**2)
dist_diff = np.sqrt((ref['he_x']-compare['he_x'])**2 + (ref['he_y']-compare['he_y'])**2)
slope = val_diff/dist_diff
mat[i, j] = slope
counts = np.unique(np.where(mat>0.3)[0], return_counts=True)
potential_outliers = counts[1]>1
if sum(potential_outliers)>0:
indices = counts[0][potential_outliers]
counts = counts[1][potential_outliers]
outlier = indices[counts==max(counts)]
outlier = outlier[0]
df = df[~(df.index==outlier)]
df = df.reset_index(drop=True)
hx = df.values.T[0].astype(int)
hy = df.values.T[1].astype(int)
xc = df.values.T[2].astype(int)
yc = df.values.T[3].astype(int)
else: break
return df
def select_xc(xc_0, xc_1, check_0, check_1):
if check_1: return xc_1
else: return xc_0
def interpolate(foreground, df, wsi_path):
hx = df.values.T[0].astype(int)
hy = df.values.T[1].astype(int)
xc = df.values.T[2].astype(int)
yc = df.values.T[3].astype(int)
x_corrections = np.zeros_like(foreground)
y_corrections = np.zeros_like(foreground)
x_interpolated = np.zeros_like(foreground)
y_interpolated = np.zeros_like(foreground)
x_corrections[hy.astype(int), hx.astype(int)] = xc
y_corrections[hy, hx] = yc
rbfi_x = Rbf(hx, hy, xc, function='linear')
rbfi_y = Rbf(hx, hy, yc, function='linear')
for i in range(x_interpolated.shape[0]):
x_interpolated[i] = rbfi_x(list(range(x_interpolated.shape[1])), [i]*x_interpolated.shape[1])
y_interpolated[i] = rbfi_y(list(range(y_interpolated.shape[1])), [i]*y_interpolated.shape[1])
np.save(f'{Path(wsi_path).stem}_x.npy', x_interpolated)
np.save(f'{Path(wsi_path).stem}_y.npy', y_interpolated)
| 17,079 | 35.340426 | 142 | py |
acrobat_submission | acrobat_submission-main/lr_utils.py | from torch.utils.data import Dataset
class WSIDataset(Dataset):
def __init__(self, df, wsi, transform, level=0, ps=256):
self.wsi = wsi
self.transform = transform
self.df = df
self.level = level
self.ps = ps
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
offset = self.ps//2
x, y = self.df.iloc[idx, 0]-offset, self.df.iloc[idx, 1]-offset
if x<0: x=0
if y<0: y=0
patch = self.wsi.read_region((x, y), self.level, (self.ps, self.ps))
patch = self.transform(patch.convert('RGB'))
return patch
| 629 | 27.636364 | 76 | py |
CX_GAN | CX_GAN-master/Cascaded Model/BRATS Data/implementation/Save BRATS data to Numpy Files.py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 09:08:06 2020
@author: ZeeshanNisar
"""
from keras.preprocessing.image import load_img, img_to_array
from tqdm import tqdm as tqdm
import os
import numpy as np
img_rows = 256
img_cols = 256
channels = 1
os.chdir('/content/drive/My Drive/GitHub Repositories')
baseDir = './Research Paper Contribution/Cascaded Model/BRATS'
trainDir = os.path.join(baseDir, 'dataset', 'train')
validDir = os.path.join(baseDir, 'dataset', 'test')
trainfolders = os.listdir(trainDir)
imgs_A = []
imgs_B = []
for folder in trainfolders:
if folder=='infected':
for img_name in tqdm(os.listdir(os.path.join(trainDir, folder))):
img = load_img(os.path.join(trainDir, folder, img_name), color_mode='grayscale')
imgs_A.append(img_to_array(img)/127.5-1)
if folder=='normal':
for img_name in tqdm(os.listdir(os.path.join(trainDir, folder))):
img = load_img(os.path.join(trainDir, folder, img_name), color_mode='grayscale')
imgs_B.append(img_to_array(img)/127.5-1)
imgs_A = np.asarray(imgs_A).reshape(-1, img_rows, img_cols, channels)
np.save(os.path.join(trainDir, 'infected_images'), imgs_A)
imgs_B = np.asarray(imgs_B).reshape(-1, img_rows, img_cols, channels)
np.save(os.path.join(trainDir, 'normal_images'), imgs_B)
| 1,286 | 31.175 | 86 | py |
FFPerceptron | FFPerceptron-main/FFperceptron_MNIST.py | import torch
import time
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize, Lambda
from torch.utils.data import DataLoader
def one_hot_encode(img0, lab):
img = img0.clone()
img[:, :10] = img0.min()
img[range(img0.shape[0]), lab] = img0.max()
return img
#Load MNIST Data
train_loader = DataLoader(
MNIST('./MNIST_data/', train=True,
download=True,
transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)), Lambda(lambda x: torch.flatten(x))])),
batch_size=60000)
test_loader = DataLoader(
MNIST('./MNIST_data/', train=False,
download=True,
transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)), Lambda(lambda x: torch.flatten(x))])),
batch_size=10000)
dtype = torch.float
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Using device:', device)
# Training images
img0, lab = next(iter(train_loader))
img0 = img0.to(device)
# Validation images
img0_tst, lab_tst = next(iter(test_loader))
img0_tst = img0_tst.to(device)
# Forward Forward Applied to a Single Perceptron for MNIST Classification
n_input, n_out = 784, 125
batch_size, learning_rate = 10, 0.0003
g_threshold = 10
epochs = 250
perceptron = torch.nn.Sequential(torch.nn.Linear(n_input, n_out, bias = True),
torch.nn.ReLU())
perceptron.to(device)
optimizer = torch.optim.Adam(perceptron.parameters(), lr = learning_rate)
N_trn = img0.size(0) #Use all training images (60000)
tic = time.time()
for epoch in range(epochs):
img = img0.clone()
for i in range(N_trn): # Random jittering of training images up to 2 pixels
dx, dy = torch.randint(-2, 2, (2,))
img[i] = torch.roll(img0[i].reshape(28, 28), shifts=(dx, dy), dims=(0, 1)).flatten()
perm = torch.randperm(N_trn)
img_pos = one_hot_encode(img[perm], lab[perm]) # Good data (actual label)
lab_neg = lab[perm] + torch.randint(low=1,high=10,size=(lab.size()))
lab_neg = torch.where(lab_neg > 9, lab_neg - 10, lab_neg)
img_neg = one_hot_encode(img[perm], lab_neg) # Bad data (random error in label)
L_tot = 0
for i in range(0, N_trn, batch_size):
perceptron.zero_grad()
# Goodness and loss for good data in batch
img_pos_batch = img_pos[i:i+batch_size]
g_pos = (perceptron(img_pos_batch)**2).mean(dim=1)
loss = torch.log(1 + torch.exp(-(g_pos - g_threshold))).sum()
# Goodness and loss for bad data in batch
img_neg_batch = img_neg[i:i+batch_size]
g_neg = (perceptron(img_neg_batch)**2).mean(dim=1)
loss += torch.log(1 + torch.exp(g_neg - g_threshold)).sum()
L_tot += loss.item() # Accumulate total loss for epoch
loss.backward() # Compute gradients
optimizer.step() # Update parameters
# Test model with validation set
N_tst = img0_tst.size(0) # Use all test images (10000)
#Evaluate goodness for all test images and labels 0...9
g_tst = torch.zeros(10,N_tst).to(device)
for n in range(10):
img_tst = one_hot_encode(img0_tst, n)
g_tst[n] = ((perceptron(img_tst[0:N_tst])**2).mean(dim=1)).detach()
predicted_label = g_tst.argmax(dim=0).cpu()
# Count number of correctly classified images in validation set
Ncorrect = (predicted_label == lab_tst).sum().cpu().numpy()
print("Epoch ", epoch+1, ":\tLoss ", L_tot, " \tTime ", round(time.time() - tic), "s\tTest Error ", 100 - Ncorrect/N_tst*100, "%") | 3,528 | 32.932692 | 134 | py |
hyperband | hyperband-master/main.py | #!/usr/bin/env python
"a more polished example of using hyperband"
"includes displaying best results and saving to a file"
import sys
import cPickle as pickle
from pprint import pprint
from hyperband import Hyperband
#from defs.gb import get_params, try_params
#from defs.rf import get_params, try_params
#from defs.xt import get_params, try_params
#from defs.rf_xt import get_params, try_params
#from defs.sgd import get_params, try_params
#from defs.keras_mlp import get_params, try_params
#from defs.polylearn_fm import get_params, try_params
#from defs.polylearn_pn import get_params, try_params
#from defs.xgb import get_params, try_params
from defs.meta import get_params, try_params
try:
output_file = sys.argv[1]
if not output_file.endswith( '.pkl' ):
output_file += '.pkl'
except IndexError:
output_file = 'results.pkl'
print "Will save results to", output_file
#
hb = Hyperband( get_params, try_params )
results = hb.run( skip_last = 1 )
print "{} total, best:\n".format( len( results ))
for r in sorted( results, key = lambda x: x['loss'] )[:5]:
print "loss: {:.2%} | {} seconds | {:.1f} iterations | run {} ".format(
r['loss'], r['seconds'], r['iterations'], r['counter'] )
pprint( r['params'] )
print
print "saving..."
with open( output_file, 'wb' ) as f:
pickle.dump( results, f ) | 1,320 | 26.520833 | 73 | py |
hyperband | hyperband-master/defs/xgb.py | "function (and parameter space) definitions for hyperband"
"binary classification with XGBoost"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data import data
from xgboost import XGBClassifier as XGB
#
trees_per_iteration = 5
space = {
'learning_rate': hp.choice( 'lr', [
'default',
hp.uniform( 'lr_', 0.01, 0.2 )
]),
'max_depth': hp.choice( 'md', [
'default',
hp.quniform( 'md_', 2, 10, 1 )
]),
'min_child_weight': hp.choice( 'mcw', [
'default',
hp.quniform( 'mcw_', 1, 10, 1 )
]),
'subsample': hp.choice( 'ss', [
'default',
hp.uniform( 'ss_', 0.5, 1.0 )
]),
'colsample_bytree': hp.choice( 'cbt', [
'default',
hp.uniform( 'cbt_', 0.5, 1.0 )
]),
'colsample_bylevel': hp.choice( 'cbl', [
'default',
hp.uniform( 'cbl_', 0.5, 1.0 )
]),
'gamma': hp.choice( 'g', [
'default',
hp.uniform( 'g_', 0, 1 )
]),
'reg_alpha': hp.choice( 'ra', [
'default',
hp.loguniform( 'ra_', log( 1e-10 ), log( 1 ))
]),
'reg_lambda': hp.choice( 'rl', [
'default',
hp.uniform( 'rl_', 0.1, 10 )
]),
'base_score': hp.choice( 'bs', [
'default',
hp.uniform( 'bs_', 0.1, 0.9 )
]),
'scale_pos_weight': hp.choice( 'spw', [
'default',
hp.uniform( 'spw', 0.1, 10 )
])
}
def get_params():
params = sample( space )
params = { k: v for k, v in params.items() if v is not 'default' }
return handle_integers( params )
#
def try_params( n_iterations, params, get_predictions = False ):
n_estimators = int( round( n_iterations * trees_per_iteration ))
print "n_estimators:", n_estimators
pprint( params )
clf = XGB( n_estimators = n_estimators, nthread = -1, **params )
return train_and_eval_sklearn_classifier( clf, data )
| 1,710 | 20.123457 | 67 | py |
hyperband | hyperband-master/defs/keras_mlp.py | "function (and parameter space) definitions for hyperband"
"binary classification with Keras (multilayer perceptron)"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data import data
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization as BatchNorm
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import *
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler, MaxAbsScaler
#
# TODO: advanced activations - 'leakyrelu', 'prelu', 'elu', 'thresholdedrelu', 'srelu'
max_layers = 5
space = {
'scaler': hp.choice( 's',
( None, 'StandardScaler', 'RobustScaler', 'MinMaxScaler', 'MaxAbsScaler' )),
'n_layers': hp.quniform( 'l', 1, max_layers, 1 ),
#'layer_size': hp.quniform( 'ls', 5, 100, 1 ),
#'activation': hp.choice( 'a', ( 'relu', 'sigmoid', 'tanh' )),
'init': hp.choice( 'i', ( 'uniform', 'normal', 'glorot_uniform',
'glorot_normal', 'he_uniform', 'he_normal' )),
'batch_size': hp.choice( 'bs', ( 16, 32, 64, 128, 256 )),
'optimizer': hp.choice( 'o', ( 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax' ))
}
# for each hidden layer, we choose size, activation and extras individually
for i in range( 1, max_layers + 1 ):
space[ 'layer_{}_size'.format( i )] = hp.quniform( 'ls{}'.format( i ), 2, 200, 1 )
space[ 'layer_{}_activation'.format( i )] = hp.choice( 'a{}'.format( i ),
( 'relu', 'sigmoid', 'tanh' ))
space[ 'layer_{}_extras'.format( i )] = hp.choice( 'e{}'.format( i ), (
{ 'name': 'dropout', 'rate': hp.uniform( 'd{}'.format( i ), 0.1, 0.5 )},
{ 'name': 'batchnorm' },
{ 'name': None } ))
def get_params():
params = sample( space )
return handle_integers( params )
#
# print hidden layers config in readable way
def print_layers( params ):
for i in range( 1, params['n_layers'] + 1 ):
print "layer {} | size: {:>3} | activation: {:<7} | extras: {}".format( i,
params['layer_{}_size'.format( i )],
params['layer_{}_activation'.format( i )],
params['layer_{}_extras'.format( i )]['name'] ),
if params['layer_{}_extras'.format( i )]['name'] == 'dropout':
print "- rate: {:.1%}".format( params['layer_{}_extras'.format( i )]['rate'] ),
print
def print_params( params ):
pprint({ k: v for k, v in params.items() if not k.startswith( 'layer_' )})
print_layers( params )
print
def try_params( n_iterations, params ):
print "iterations:", n_iterations
print_params( params )
y_train = data['y_train']
y_test = data['y_test']
if params['scaler']:
scaler = eval( "{}()".format( params['scaler'] ))
x_train_ = scaler.fit_transform( data['x_train'].astype( float ))
x_test_ = scaler.transform( data['x_test'].astype( float ))
else:
x_train_ = data['x_train']
x_test_ = data['x_test']
input_dim = x_train_.shape[1]
model = Sequential()
model.add( Dense( params['layer_1_size'], init = params['init'],
activation = params['layer_1_activation'], input_dim = input_dim ))
for i in range( int( params['n_layers'] ) - 1 ):
extras = 'layer_{}_extras'.format( i + 1 )
if params[extras]['name'] == 'dropout':
model.add( Dropout( params[extras]['rate'] ))
elif params[extras]['name'] == 'batchnorm':
model.add( BatchNorm())
model.add( Dense( params['layer_{}_size'.format( i + 2 )], init = params['init'],
activation = params['layer_{}_activation'.format( i + 2 )]))
model.add( Dense( 1, init = params['init'], activation = 'sigmoid' ))
model.compile( optimizer = params['optimizer'], loss = 'binary_crossentropy' )
#print model.summary()
#
validation_data = ( x_test_, y_test )
early_stopping = EarlyStopping( monitor = 'val_loss', patience = 5, verbose = 0 )
history = model.fit( x_train_, y_train,
nb_epoch = int( round( n_iterations )),
batch_size = params['batch_size'],
shuffle = False,
validation_data = validation_data,
callbacks = [ early_stopping ])
#
p = model.predict_proba( x_train_, batch_size = params['batch_size'] )
ll = log_loss( y_train, p )
auc = AUC( y_train, p )
acc = accuracy( y_train, np.round( p ))
print "\n# training | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format( ll, auc, acc )
#
p = model.predict_proba( x_test_, batch_size = params['batch_size'] )
ll = log_loss( y_test, p )
auc = AUC( y_test, p )
acc = accuracy( y_test, np.round( p ))
print "# testing | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format( ll, auc, acc )
return { 'loss': ll, 'log_loss': ll, 'auc': auc, 'early_stop': model.stop_training }
| 4,624 | 30.678082 | 94 | py |
hyperband | hyperband-master/defs/meta.py | # meta classifier
from common_defs import *
models = ( 'xgb', 'gb', 'rf', 'xt', 'sgd', 'polylearn_fm', 'polylearn_pn', 'keras_mlp' )
# import all the functions
for m in models:
exec( "from defs.{} import get_params as get_params_{}" ).format( m, m )
exec( "from defs.{} import try_params as try_params_{}" ).format( m, m )
space = { 'model': hp.choice( 'model', models ) }
def get_params():
params = sample( space )
m = params['model']
m_params = eval( "get_params_{}()".format( m ))
params.update( m_params )
return params
def try_params( n_iterations, params ):
params_ = dict( params )
m = params_.pop( 'model' )
print m
return eval( "try_params_{}( n_iterations, params_ )".format( m ))
| 715 | 25.518519 | 88 | py |
hyperband | hyperband-master/defs_regression/xgb.py | "function (and parameter space) definitions for hyperband"
"regression with XGBoost"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data_for_regression import data
from xgboost import XGBRegressor as XGB
#
trees_per_iteration = 5
space = {
'learning_rate': hp.choice( 'lr', [
'default',
hp.uniform( 'lr_', 0.01, 0.2 )
]),
'max_depth': hp.choice( 'md', [
'default',
hp.quniform( 'md_', 2, 10, 1 )
]),
'min_child_weight': hp.choice( 'mcw', [
'default',
hp.quniform( 'mcw_', 1, 10, 1 )
]),
'subsample': hp.choice( 'ss', [
'default',
hp.uniform( 'ss_', 0.5, 1.0 )
]),
'colsample_bytree': hp.choice( 'cbt', [
'default',
hp.uniform( 'cbt_', 0.5, 1.0 )
]),
'colsample_bylevel': hp.choice( 'cbl', [
'default',
hp.uniform( 'cbl_', 0.5, 1.0 )
]),
'gamma': hp.choice( 'g', [
'default',
hp.uniform( 'g_', 0, 1 )
]),
'reg_alpha': hp.choice( 'ra', [
'default',
hp.loguniform( 'ra_', log( 1e-10 ), log( 1 ))
]),
'reg_lambda': hp.choice( 'rl', [
'default',
hp.uniform( 'rl_', 0.1, 10 )
]),
'base_score': hp.choice( 'bs', [
'default',
hp.uniform( 'bs_', 0.1, 0.9 )
]),
'scale_pos_weight': hp.choice( 'spw', [
'default',
hp.uniform( 'spw', 0.1, 10 )
])
}
def get_params():
params = sample( space )
params = { k: v for k, v in params.items() if v is not 'default' }
return handle_integers( params )
#
def try_params( n_iterations, params, get_predictions = False ):
n_estimators = int( round( n_iterations * trees_per_iteration ))
print "n_estimators:", n_estimators
pprint( params )
model = XGB( n_estimators = n_estimators, nthread = -1, **params )
return train_and_eval_sklearn_regressor( model, data )
| 1,716 | 20.197531 | 67 | py |
hyperband | hyperband-master/defs_regression/keras_mlp.py | "function (and parameter space) definitions for hyperband"
"regression with Keras (multilayer perceptron)"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data_for_regression import data
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization as BatchNorm
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import *
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler, MaxAbsScaler
#
# TODO: advanced activations - 'leakyrelu', 'prelu', 'elu', 'thresholdedrelu', 'srelu'
max_layers = 5
max_layer_size = 100
space = {
'scaler': hp.choice( 's',
( None, 'StandardScaler', 'RobustScaler', 'MinMaxScaler', 'MaxAbsScaler' )),
'n_layers': hp.quniform( 'ls', 1, max_layers, 1 ),
#'layer_size': hp.quniform( 'ls', 5, 100, 1 ),
#'activation': hp.choice( 'a', ( 'relu', 'sigmoid', 'tanh' )),
'init': hp.choice( 'i', ( 'uniform', 'normal', 'glorot_uniform',
'glorot_normal', 'he_uniform', 'he_normal' )),
'batch_size': hp.choice( 'bs', ( 16, 32, 64, 128, 256 )),
'shuffle': hp.choice( 'sh', ( False, True )),
'loss': hp.choice( 'l', ( 'mean_absolute_error', 'mean_squared_error' )),
'optimizer': hp.choice( 'o', ( 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax' ))
}
# for each hidden layer, we choose size, activation and extras individually
for i in range( 1, max_layers + 1 ):
space[ 'layer_{}_size'.format( i )] = hp.quniform( 'ls{}'.format( i ),
2, max_layer_size, 1 )
space[ 'layer_{}_activation'.format( i )] = hp.choice( 'a{}'.format( i ),
( 'relu', 'sigmoid', 'tanh' ))
space[ 'layer_{}_extras'.format( i )] = hp.choice( 'e{}'.format( i ), (
{ 'name': 'dropout', 'rate': hp.uniform( 'd{}'.format( i ), 0.1, 0.5 )},
{ 'name': 'batchnorm' },
{ 'name': None } ))
def get_params():
params = sample( space )
return handle_integers( params )
#
# print hidden layers config in readable way
def print_layers( params ):
for i in range( 1, params['n_layers'] + 1 ):
print "layer {} | size: {:>3} | activation: {:<7} | extras: {}".format( i,
params['layer_{}_size'.format( i )],
params['layer_{}_activation'.format( i )],
params['layer_{}_extras'.format( i )]['name'] ),
if params['layer_{}_extras'.format( i )]['name'] == 'dropout':
print "- rate: {:.1%}".format( params['layer_{}_extras'.format( i )]['rate'] ),
print
def print_params( params ):
pprint({ k: v for k, v in params.items() if not k.startswith( 'layer_' )})
print_layers( params )
print
def try_params( n_iterations, params ):
print "iterations:", n_iterations
print_params( params )
y_train = data['y_train']
y_test = data['y_test']
if params['scaler']:
scaler = eval( "{}()".format( params['scaler'] ))
x_train_ = scaler.fit_transform( data['x_train'].astype( float ))
x_test_ = scaler.transform( data['x_test'].astype( float ))
else:
x_train_ = data['x_train']
x_test_ = data['x_test']
input_dim = x_train_.shape[1]
model = Sequential()
model.add( Dense( params['layer_1_size'], init = params['init'],
activation = params['layer_1_activation'], input_dim = input_dim ))
for i in range( int( params['n_layers'] ) - 1 ):
extras = 'layer_{}_extras'.format( i + 1 )
if params[extras]['name'] == 'dropout':
model.add( Dropout( params[extras]['rate'] ))
elif params[extras]['name'] == 'batchnorm':
model.add( BatchNorm())
model.add( Dense( params['layer_{}_size'.format( i + 2 )], init = params['init'],
activation = params['layer_{}_activation'.format( i + 2 )]))
model.add( Dense( 1, init = params['init'], activation = 'linear' ))
model.compile( optimizer = params['optimizer'], loss = params['loss'] )
#print model.summary()
#
validation_data = ( x_test_, y_test )
early_stopping = EarlyStopping( monitor = 'val_loss', patience = 5, verbose = 0 )
history = model.fit( x_train_, y_train,
nb_epoch = int( round( n_iterations )),
batch_size = params['batch_size'],
shuffle = params['shuffle'],
validation_data = validation_data,
callbacks = [ early_stopping ])
#
p = model.predict( x_train_, batch_size = params['batch_size'] )
mse = MSE( y_train, p )
rmse = sqrt( mse )
mae = MAE( y_train, p )
print "\n# training | RMSE: {:.4f}, MAE: {:.4f}".format( rmse, mae )
#
p = model.predict( x_test_, batch_size = params['batch_size'] )
mse = MSE( y_test, p )
rmse = sqrt( mse )
mae = MAE( y_test, p )
print "# testing | RMSE: {:.4f}, MAE: {:.4f}".format( rmse, mae )
return { 'loss': rmse, 'rmse': rmse, 'mae': mae, 'early_stop': model.stop_training }
| 4,683 | 29.815789 | 90 | py |
hyperband | hyperband-master/defs_regression/meta.py | # meta regressor
from common_defs import *
regressors = ( 'gb', 'rf', 'xt', 'sgd', 'polylearn_fm', 'polylearn_pn', 'keras_mlp' )
# import all the functions
for r in regressors:
exec( "from defs_regression.{} import get_params as get_params_{}".format( r, r ))
exec( "from defs_regression.{} import try_params as try_params_{}".format( r, r ))
space = { 'regressor': hp.choice( 'r', regressors ) }
def get_params():
params = sample( space )
r = params['regressor']
r_params = eval( "get_params_{}()".format( r ))
params.update( r_params )
return params
def try_params( n_iterations, params ):
params_ = dict( params )
r = params_.pop( 'regressor' )
print r
return eval( "try_params_{}( n_iterations, params_ )".format( r ))
| 749 | 25.785714 | 85 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/main_gen_pseudo-data.py | #
# Experiment Entry point
# 1. Trains model on Syn Data
# 2. Generates CelebA Data
# 3. Trains on Syn + CelebA Data
#
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.nn as nn
import argparse
import wandb
from data_loading import *
from utils import *
from shading import *
from train import *
from models import *
def main():
ON_SERVER = True
parser = argparse.ArgumentParser(description='SfSNet - Residual')
parser.add_argument('--batch_size', type=int, default=8, metavar='N',
help='input batch size for training (default: 8)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wt_decay', type=float, default=0.0005, metavar='W',
help='SGD momentum (default: 0.0005)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--read_first', type=int, default=-1,
help='read first n rows (default: -1)')
parser.add_argument('--details', type=str, default=None,
help='Explaination of the run')
if ON_SERVER:
parser.add_argument('--syn_data', type=str, default='/nfs/bigdisk/bsonawane/sfsnet_data/',
help='Synthetic Dataset path')
parser.add_argument('--celeba_data', type=str, default='/nfs/bigdisk/bsonawane/CelebA-dataset/CelebA_crop_resize_128/',
help='CelebA Dataset path')
parser.add_argument('--log_dir', type=str, default='./results/',
help='Log Path')
else:
parser.add_argument('--syn_data', type=str, default='./data/sfs-net/',
help='Synthetic Dataset path')
parser.add_argument('--celeba_data', type=str, default='./data/celeba/',
help='CelebA Dataset path')
parser.add_argument('--log_dir', type=str, default='./results/',
help='Log Path')
parser.add_argument('--load_model', type=str, default=None,
help='load model from')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
# initialization
syn_data = args.syn_data
celeba_data = args.celeba_data
batch_size = args.batch_size
lr = args.lr
wt_decay = args.wt_decay
log_dir = args.log_dir
epochs = args.epochs
model_dir = args.load_model
read_first = args.read_first
if read_first == -1:
read_first = None
# Debugging and check working
# syn_train_csv = syn_data + '/train.csv'
# train_dataset, _ = get_sfsnet_dataset(syn_dir=syn_data+'train/', read_from_csv=syn_train_csv, read_celeba_csv=None, read_first=read_first, validation_split=5)
# train_dl = DataLoader(train_dataset, batch_size=10, shuffle=False)
# validate_shading_method(train_dl)
# return
# Init WandB for logging
wandb.init(project='SfSNet-CelebA-Baseline-V3-SkipNetBased')
wandb.log({'lr':lr, 'weight decay': wt_decay})
# Initialize models
skipnet_model = SkipNet()
if use_cuda:
skipnet_model = skipnet_model.cuda()
if model_dir is not None:
skipnet_model.load_state_dict(torch.load(model_dir + 'skipnet_model.pkl'))
else:
print('Initializing weights')
skipnet_model.apply(weights_init)
os.system('mkdir -p {}'.format(args.log_dir))
with open(args.log_dir+'/details.txt', 'w') as f:
f.write(args.details)
wandb.watch(skipnet_model)
# 1. Train on Synthetic data
train_synthetic(skipnet_model, syn_data, celeba_data = celeba_data, read_first=read_first, \
batch_size=batch_size, num_epochs=epochs, log_path=log_dir+'Synthetic_Train/', use_cuda=use_cuda, wandb=wandb, \
lr=lr, wt_decay=wt_decay, training_syn=True)
# 2. Generate Pseudo-Training information for CelebA dataset
# Load CelebA dataset
celeba_train_csv = celeba_data + '/train.csv'
celeba_test_csv = celeba_data + '/test.csv'
train_dataset, _ = get_celeba_dataset(read_from_csv=celeba_train_csv, read_first=read_first, validation_split=0)
test_dataset, _ = get_celeba_dataset(read_from_csv=celeba_test_csv, read_first=read_first, validation_split=0)
celeba_train_dl = DataLoader(train_dataset, batch_size=1, shuffle=True)
celeba_test_dl = DataLoader(test_dataset, batch_size=1, shuffle=True)
out_celeba_images_dir = celeba_data + 'synthesized_data_skip_net/'
out_train_celeba_images_dir = out_celeba_images_dir + 'train/'
out_test_celeba_images_dir = out_celeba_images_dir + 'test/'
os.system('mkdir -p {}'.format(out_train_celeba_images_dir))
os.system('mkdir -p {}'.format(out_test_celeba_images_dir))
# Dump normal, albedo, shading, face and sh for celeba dataset
generate_celeba_synthesize(skipnet_model, celeba_train_dl, train_epoch_num=epochs, use_cuda=use_cuda,
out_folder=out_train_celeba_images_dir, wandb=wandb)
generate_celeba_synthesize(skipnet_model, celeba_test_dl, train_epoch_num=epochs, use_cuda=use_cuda,
out_folder=out_test_celeba_images_dir, wandb=wandb)
# generate CSV for images generated above
generate_celeba_synthesize_data_csv(out_train_celeba_images_dir, out_celeba_images_dir + '/train.csv')
generate_celeba_synthesize_data_csv(out_test_celeba_images_dir, out_celeba_images_dir + '/test.csv')
if __name__ == '__main__':
main()
| 6,117 | 41.783217 | 164 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/data_loading.py | import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms
import glob
import cv2
from random import randint
import os
from skimage import io
from PIL import Image
import pandas as pd
from utils import save_image, denorm, get_normal_in_range
import numpy as np
IMAGE_SIZE = 128
def generate_sfsnet_data_csv(dir, save_location):
albedo = set()
normal = set()
depth = set()
mask = set()
face = set()
sh = set()
name_to_set = {'albedo' : albedo, 'normal' : normal, 'depth' : depth, \
'mask' : mask, 'face' : face, 'light' : sh}
for k, v in name_to_set.items():
regex_str = '*/*_' + k + '_*'
for img in sorted(glob.glob(dir + regex_str)):
timg = img.split('/')
folder_id = timg[-2]
name = timg[-1].split('.')[0]
name = name.split('_')
assert(len(name) == 4)
name = folder_id + '_' + name[0] + '_' + name[2] + '_' + name[3]
v.add(name)
final_images = set.intersection(albedo, normal, depth, mask, face, sh)
albedo = []
normal = []
depth = []
mask = []
face = []
sh = []
name = []
name_to_list = {'albedo' : albedo, 'normal' : normal, 'depth' : depth, \
'mask' : mask, 'face' : face, 'light' : sh, 'name' : name}
for img in final_images:
split = img.split('_')
for k, v in name_to_list.items():
ext = '.png'
if k == 'light':
ext = '.txt'
if k == 'name':
filename = split[0] + '_' + split[1] + '_' + k + '_' + '_'.join(split[2:])
else:
file_name = split[0] + '/' + split[1] + '_' + k + '_' + '_'.join(split[2:]) + ext
v.append(file_name)
df = pd.DataFrame(data=name_to_list)
df.to_csv(save_location)
def generate_celeba_synthesize_data_csv(dir, save_location):
albedo = []
normal = []
depth = []
mask = []
face = []
sh = []
name = []
for img in sorted(glob.glob(dir + '*_albedo*')):
albedo.append(img)
for img in sorted(glob.glob(dir + '*_normal*')):
normal.append(img)
for img in sorted(glob.glob(dir + '*_face*')):
face.append(img)
mask.append('None')
depth.append('None')
iname = img.split('/')[-1].split('.')[0]
name.append(iname)
for l in sorted(glob.glob(dir + '*_light*')):
sh.append(l)
name_to_list = {'albedo' : albedo, 'normal' : normal, 'depth' : depth, \
'mask' : mask, 'face' : face, 'light' : sh, 'name' : name}
df = pd.DataFrame(data=name_to_list)
df.to_csv(save_location)
print('saved')
def generate_celeba_data_csv(dir, save_location):
face = []
name = []
for img in sorted(glob.glob(dir + '*/all/*.jpg')):
face.append(img)
iname = img.split('/')[-1].split('.')[0]
name.append(iname)
face_to_list = {'face': face, 'name':name}
df = pd.DataFrame(data=face_to_list)
df.to_csv(save_location)
def get_sfsnet_dataset(syn_dir=None, read_from_csv=None, read_celeba_csv=None, read_first=None, validation_split=0, training_syn=False):
albedo = []
sh = []
mask = []
normal = []
face = []
depth = []
if training_syn:
read_celeba_csv = None
if read_from_csv is None:
for img in sorted(glob.glob(syn_dir + '*/*_albedo_*')):
albedo.append(img)
for img in sorted(glob.glob(syn_dir + '*/*_face_*')):
face.append(img)
for img in sorted(glob.glob(syn_dir + '*/*_normal_*')):
normal.append(img)
for img in sorted(glob.glob(syn_dir + '*/*_depth_*')):
depth.append(img)
for img in sorted(glob.glob(syn_dir + '*/*_mask_*')):
mask.append(img)
for img in sorted(glob.glob(syn_dir + '*/*_light_*.txt')):
sh.append(img)
else:
df = pd.read_csv(read_from_csv)
df = df[:read_first]
albedo = list(df['albedo'])
face = list(df['face'])
normal = list(df['normal'])
depth = list(df['depth'])
mask = list(df['mask'])
sh = list(df['light'])
name_to_list = {'albedo' : albedo, 'normal' : normal, 'depth' : depth, \
'mask' : mask, 'face' : face, 'light' : sh}
for _, v in name_to_list.items():
v[:] = [syn_dir + el for el in v]
# Merge Synthesized Celeba dataset for Psedo-Supervised training
if read_celeba_csv is not None:
df = pd.read_csv(read_celeba_csv)
df = df[:read_first]
albedo += list(df['albedo'])
face += list(df['face'])
normal += list(df['normal'])
depth += list(df['depth'])
mask += list(df['mask'])
sh += list(df['light'])
assert(len(albedo) == len(face) == len(normal) == len(depth) == len(mask) == len(sh))
dataset_size = len(albedo)
validation_count = int (validation_split * dataset_size / 100)
train_count = dataset_size - validation_count
# Build custom datasets
transform = transforms.Compose([
transforms.Resize(IMAGE_SIZE),
transforms.ToTensor()
])
full_dataset = SfSNetDataset(albedo, face, normal, mask, sh, transform)
# TODO: This will vary dataset run-to-run
# Shall we just split manually to ensure run-to-run train-val dataset is same?
train_dataset, val_dataset = random_split(full_dataset, [train_count, validation_count])
return train_dataset, val_dataset
def get_celeba_dataset(dir=None, read_from_csv=None, read_first=None, validation_split=0):
face = []
if read_from_csv is None:
for img in sorted(glob.glob(dir + '*/*_face_*')):
face.append(img)
else:
df = pd.read_csv(read_from_csv)
df = df[:read_first]
face = list(df['face'])
dataset_size = len(face)
validation_count = int (validation_split * dataset_size / 100)
train_count = dataset_size - validation_count
# Build custom datasets
transform = transforms.Compose([
transforms.Resize(IMAGE_SIZE),
transforms.ToTensor()
])
full_dataset = CelebADataset(face, transform)
# TODO: This will vary dataset run-to-run
# Shall we just split manually to ensure run-to-run train-val dataset is same?
train_dataset, val_dataset = random_split(full_dataset, [train_count, validation_count])
return train_dataset, val_dataset
def generate_celeba_synthesize(sfs_net_model, dl, train_epoch_num = 0,
use_cuda = False, out_folder = None, wandb = None):
# debugging flag to dump image
fix_bix_dump = 0
recon_loss = nn.L1Loss()
if use_cuda:
recon_loss = recon_loss.cuda()
tloss = 0 # Total loss
rloss = 0 # Reconstruction loss
for bix, data in enumerate(dl):
face = data
if use_cuda:
face = face.cuda()
# predicted_face == reconstruction
predicted_normal, predicted_albedo, predicted_sh, predicted_shading, predicted_face = sfs_net_model(face)
# save predictions in log folder
file_name = out_folder + str(train_epoch_num) + '_' + str(bix)
# log images
predicted_normal = denorm(predicted_normal)
save_image(predicted_normal, path = file_name+'_normal.png')
save_image(predicted_albedo, path = file_name+'_albedo.png')
save_image(predicted_shading, path = file_name+'_shading.png')
save_image(predicted_face, path = file_name+'_recon.png')
save_image(face, path = file_name+'_face.png')
np.savetxt(file_name+'_light.txt', predicted_sh.cpu().detach().numpy(), delimiter='\t')
# Loss computation
# Reconstruction loss
total_loss = recon_loss(predicted_face, face)
# Logging for display and debugging purposes
tloss += total_loss.item()
len_dl = len(dl)
f = open(out_folder + 'readme.txt', 'w')
f.write('Average Reconstruction Loss: ' + str(tloss / len_dl))
f.close()
# return average loss over dataset
return tloss / len_dl
class SfSNetDataset(Dataset):
def __init__(self, albedo, face, normal, mask, sh, transform = None):
self.albedo = albedo
self.face = face
self.normal = normal
self.mask = mask
self.sh = sh
self.transform = transform
self.dataset_len = len(self.albedo)
self.mask_transform = transforms.Compose([
transforms.Resize(IMAGE_SIZE),
transforms.ToTensor(),
])
self.normal_transform = transforms.Compose([
transforms.Resize(IMAGE_SIZE)
])
def __getitem__(self, index):
albedo = self.transform(Image.open(self.albedo[index]))
face = self.transform(Image.open(self.face[index]))
# normal = io.imread(self.face[index]))
normal = self.normal_transform(Image.open(self.normal[index]))
normal = torch.tensor(np.asarray(normal)).permute([2, 0, 1])
normal = normal.type(torch.float)
normal = (normal - 128) / 128
if self.mask[index] == 'None':
# Load dummy 1 mask for CelebA
# To ensure consistency if mask is used
mask = torch.ones(3, IMAGE_SIZE, IMAGE_SIZE)
else:
mask = self.mask_transform(Image.open(self.mask[index]))
pd_sh = pd.read_csv(self.sh[index], sep='\t', header = None)
sh = torch.tensor(pd_sh.values).type(torch.float).reshape(-1)
return albedo, normal, mask, sh, face
def __len__(self):
return self.dataset_len
class CelebADataset(Dataset):
def __init__(self, face, transform = None):
self.face = face
self.transform = transform
self.dataset_len = len(self.face)
self.mask_transform = transforms.Compose([
transforms.Resize(IMAGE_SIZE),
transforms.ToTensor()
])
def __getitem__(self, index):
face = self.transform(Image.open(self.face[index]))
return face
def __len__(self):
return self.dataset_len
| 10,618 | 32.498423 | 136 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/utils.py | import matplotlib.pyplot as plt
import torchvision
from PIL import Image
from torch.nn import *
def applyMask(input_img, mask):
if mask is None:
return input_img
return input_img * mask
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def get_normal_in_range(normal):
new_normal = normal * 128 + 128
new_normal = new_normal.clamp(0, 255) / 255
return new_normal
def get_image_grid(pic, denormalize=False, mask=None):
if denormalize:
pic = denorm(pic)
if mask is not None:
pic = pic * mask
grid = torchvision.utils.make_grid(pic, nrow=8, padding=2)
ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
return ndarr
def save_image(pic, denormalize=False, path=None, mask=None):
ndarr = get_image_grid(pic, denormalize=denormalize, mask=mask)
if path == None:
plt.imshow(ndarr)
plt.show()
else:
im = Image.fromarray(ndarr)
im.save(path)
def wandb_log_images(wandb, img, mask, caption, step, log_name, path=None, denormalize=False):
ndarr = get_image_grid(img, denormalize=denormalize, mask=mask)
# save image if path is provided
if path is not None:
im = Image.fromarray(ndarr)
im.save(path)
wimg = wandb.Image(ndarr, caption=caption)
wandb.log({log_name: wimg})
def weights_init(m):
if isinstance(m, Conv2d) or isinstance(m, Conv1d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, Linear):
init.normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
| 1,681 | 26.57377 | 94 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/main_mix_training.py | #
# Experiment Entry point
# 1. Trains model on Syn Data
# 2. Generates CelebA Data
# 3. Trains on Syn + CelebA Data
#
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.nn as nn
import argparse
import wandb
from data_loading import *
from utils import *
from shading import *
from train import *
from models import *
from sfs_net_model import SfSNet as sfsnet_pretrained_model
def main():
ON_SERVER = False
parser = argparse.ArgumentParser(description='SfSNet - Residual')
parser.add_argument('--batch_size', type=int, default=8, metavar='N',
help='input batch size for training (default: 8)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wt_decay', type=float, default=0.0005, metavar='W',
help='SGD momentum (default: 0.0005)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--read_first', type=int, default=-1,
help='read first n rows (default: -1)')
parser.add_argument('--details', type=str, default=None,
help='Explaination of the run')
parser.add_argument('--load_pretrained_model', type=str, default='./pretrained/net_epoch_r5_5.pth',
help='Pretrained model path')
if ON_SERVER:
parser.add_argument('--syn_data', type=str, default='/nfs/bigdisk/bsonawane/sfsnet_data/',
help='Synthetic Dataset path')
parser.add_argument('--celeba_data', type=str, default='/nfs/bigdisk/bsonawane/CelebA-dataset/celeba_sfsnet_gen_20k/',
help='CelebA Dataset path')
parser.add_argument('--log_dir', type=str, default='./results/',
help='Log Path')
else:
parser.add_argument('--syn_data', type=str, default='./data/sfs-net/',
help='Synthetic Dataset path')
parser.add_argument('--celeba_data', type=str, default='./data/celeba_20k/',
help='CelebA Dataset path')
parser.add_argument('--log_dir', type=str, default='./results/',
help='Log Path')
parser.add_argument('--load_model', type=str, default=None,
help='load model from')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
# initialization
syn_data = args.syn_data
celeba_data = args.celeba_data
batch_size = args.batch_size
lr = args.lr
wt_decay = args.wt_decay
log_dir = args.log_dir
epochs = args.epochs
model_dir = args.load_model
read_first = args.read_first
pretrained_model_dict = args.load_pretrained_model
if read_first == -1:
read_first = None
# Debugging and check working
# syn_train_csv = syn_data + '/train.csv'
# train_dataset, _ = get_sfsnet_dataset(syn_dir=syn_data+'train/', read_from_csv=syn_train_csv, read_celeba_csv=None, read_first=read_first, validation_split=5)
# train_dl = DataLoader(train_dataset, batch_size=10, shuffle=False)
# validate_shading_method(train_dl)
# return
# Init WandB for logging
wandb.init(project='SfSNet-CelebA-Baseline-V2-PreTrained')
wandb.log({'lr':lr, 'weight decay': wt_decay})
# Initialize models
sfs_net_model = SfsNetPipeline()
if use_cuda:
sfs_net_model = sfs_net_model.cuda()
if model_dir is not None:
sfs_net_model.load_state_dict(torch.load(model_dir + 'sfs_net_model.pkl'))
else:
sfs_net_model.apply(weights_init)
sfs_net_pretrained_dict = torch.load(pretrained_model_dict)
sfs_net_state_dict = sfs_net_model.state_dict()
load_model_from_pretrained(sfs_net_pretrained_dict, sfs_net_state_dict)
sfs_net_model.load_state_dict(sfs_net_state_dict)
sfs_net_model.fix_weights()
os.system('mkdir -p {}'.format(args.log_dir))
with open(args.log_dir+'/details.txt', 'w') as f:
f.write(args.details)
wandb.watch(sfs_net_model)
# 1. Train on both Synthetic and Real (Celeba) dataset
train(sfs_net_model, syn_data, celeba_data=celeba_data, read_first=read_first,\
batch_size=batch_size, num_epochs=epochs, log_path=log_dir+'Mix_Training/', use_cuda=use_cuda, wandb=wandb, \
lr=lr, wt_decay=wt_decay)
if __name__ == '__main__':
main() | 4,960 | 40 | 164 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/main_gen_synthetic_and_full.py | #
# Experiment Entry point
# 1. Trains model on Syn Data
# 2. Generates CelebA Data
# 3. Trains on Syn + CelebA Data
#
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.nn as nn
import argparse
import wandb
from data_loading import *
from utils import *
from shading import *
from train import *
from models import *
def main():
ON_SERVER = True
parser = argparse.ArgumentParser(description='SfSNet - Residual')
parser.add_argument('--batch_size', type=int, default=8, metavar='N',
help='input batch size for training (default: 8)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wt_decay', type=float, default=0.0005, metavar='W',
help='SGD momentum (default: 0.0005)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--read_first', type=int, default=-1,
help='read first n rows (default: -1)')
parser.add_argument('--details', type=str, default=None,
help='Explaination of the run')
if ON_SERVER:
parser.add_argument('--syn_data', type=str, default='/nfs/bigdisk/bsonawane/sfsnet_data/',
help='Synthetic Dataset path')
parser.add_argument('--celeba_data', type=str, default='/nfs/bigdisk/bsonawane/CelebA-dataset/CelebA_crop_resize_128/',
help='CelebA Dataset path')
parser.add_argument('--log_dir', type=str, default='./results/',
help='Log Path')
else:
parser.add_argument('--syn_data', type=str, default='./data/sfs-net/',
help='Synthetic Dataset path')
parser.add_argument('--celeba_data', type=str, default='./data/celeba/',
help='CelebA Dataset path')
parser.add_argument('--log_dir', type=str, default='./results/',
help='Log Path')
parser.add_argument('--load_model', type=str, default=None,
help='load model from')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
# initialization
syn_data = args.syn_data
celeba_data = args.celeba_data
batch_size = args.batch_size
lr = args.lr
wt_decay = args.wt_decay
log_dir = args.log_dir
epochs = args.epochs
model_dir = args.load_model
read_first = args.read_first
if read_first == -1:
read_first = None
# Debugging and check working
# syn_train_csv = syn_data + '/train.csv'
# train_dataset, _ = get_sfsnet_dataset(syn_dir=syn_data+'train/', read_from_csv=syn_train_csv, read_celeba_csv=None, read_first=read_first, validation_split=5)
# train_dl = DataLoader(train_dataset, batch_size=10, shuffle=False)
# validate_shading_method(train_dl)
# return
# Init WandB for logging
wandb.init(project='SfSNet-CelebA-Baseline-V3-SkipNetBased')
wandb.log({'lr':lr, 'weight decay': wt_decay})
# Initialize models
skipnet_model = SkipNet()
if use_cuda:
skipnet_model = skipnet_model.cuda()
if model_dir is not None:
skipnet_model.load_state_dict(torch.load(model_dir + 'skipnet_model.pkl'))
else:
print('Initializing weights')
skipnet_model.apply(weights_init)
os.system('mkdir -p {}'.format(args.log_dir))
with open(args.log_dir+'/details.txt', 'w') as f:
f.write(args.details)
wandb.watch(skipnet_model)
# 1. Train on Synthetic data
train_synthetic(skipnet_model, syn_data, celeba_data = celeba_data, read_first=read_first, \
batch_size=batch_size, num_epochs=epochs, log_path=log_dir+'Synthetic_Train/', use_cuda=use_cuda, wandb=wandb, \
lr=lr, wt_decay=wt_decay, training_syn=True)
# 2. Generate Pseudo-Training information for CelebA dataset
# Load CelebA dataset
celeba_train_csv = celeba_data + '/train.csv'
celeba_test_csv = celeba_data + '/test.csv'
train_dataset, _ = get_celeba_dataset(read_from_csv=celeba_train_csv, read_first=read_first, validation_split=0)
test_dataset, _ = get_celeba_dataset(read_from_csv=celeba_test_csv, read_first=read_first, validation_split=0)
celeba_train_dl = DataLoader(train_dataset, batch_size=1, shuffle=True)
celeba_test_dl = DataLoader(test_dataset, batch_size=1, shuffle=True)
out_celeba_images_dir = celeba_data + 'synthesized_data_skip_net/'
out_train_celeba_images_dir = out_celeba_images_dir + 'train/'
out_test_celeba_images_dir = out_celeba_images_dir + 'test/'
os.system('mkdir -p {}'.format(out_train_celeba_images_dir))
os.system('mkdir -p {}'.format(out_test_celeba_images_dir))
# Dump normal, albedo, shading, face and sh for celeba dataset
v_total = generate_celeba_synthesize(skipnet_model, celeba_train_dl, train_epoch_num=epochs, use_cuda=use_cuda,
out_folder=out_train_celeba_images_dir, wandb=wandb)
v_total = generate_celeba_synthesize(skipnet_model, celeba_test_dl, train_epoch_num=epochs, use_cuda=use_cuda,
out_folder=out_test_celeba_images_dir, wandb=wandb)
# generate CSV for images generated above
generate_celeba_synthesize_data_csv(out_train_celeba_images_dir, out_celeba_images_dir + '/train.csv')
generate_celeba_synthesize_data_csv(out_test_celeba_images_dir, out_celeba_images_dir + '/test.csv')
# 3. Train on both Synthetic and Real (Celeba) dataset
sfsnet_model = SfsNetPipeline()
if use_cuda:
sfsnet_model = sfsnet_model.cuda()
# if model_dir is not None:
# sfsnet_model.load_state_dict(torch.load(model_dir + 'sfsnet_model.pkl'))
#else:
print('Initializing weights')
skipnet_model.apply(weights_init)
train(sfsnet_model, syn_data, celeba_data=out_celeba_images_dir, read_first=read_first,\
batch_size=batch_size, num_epochs=epochs, log_path=log_dir+'Mix_Training/', use_cuda=use_cuda, wandb=wandb, \
lr=lr, wt_decay=wt_decay)
if __name__ == '__main__':
main()
| 6,749 | 41.993631 | 164 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import denorm
def get_shading(N, L):
c1 = 0.8862269254527579
c2 = 1.0233267079464883
c3 = 0.24770795610037571
c4 = 0.8580855308097834
c5 = 0.4290427654048917
nx = N[:, 0, :, :]
ny = N[:, 1, :, :]
nz = N[:, 2, :, :]
b, c, h, w = N.shape
Y1 = c1 * torch.ones(b, h, w)
Y2 = c2 * nz
Y3 = c2 * nx
Y4 = c2 * ny
Y5 = c3 * (2 * nz * nz - nx * nx - ny * ny)
Y6 = c4 * nx * nz
Y7 = c4 * ny * nz
Y8 = c5 * (nx * nx - ny * ny)
Y9 = c4 * nx * ny
L = L.type(torch.float)
sh = torch.split(L, 9, dim=1)
assert(c == len(sh))
shading = torch.zeros(b, c, h, w)
if torch.cuda.is_available():
Y1 = Y1.cuda()
shading = shading.cuda()
for j in range(c):
l = sh[j]
# Scale to 'h x w' dim
l = l.repeat(1, h*w).view(b, h, w, 9)
# Convert l into 'batch size', 'Index SH', 'h', 'w'
l = l.permute([0, 3, 1, 2])
# Generate shading
shading[:, j, :, :] = Y1 * l[:, 0] + Y2 * l[:, 1] + Y3 * l[:, 2] + \
Y4 * l[:, 3] + Y5 * l[:, 4] + Y6 * l[:, 5] + \
Y7 * l[:, 6] + Y8 * l[:, 7] + Y9 * l[:, 8]
return shading
class sfsNetShading(nn.Module):
def __init__(self):
super(sfsNetShading, self).__init__()
def forward(self, N, L):
# Following values are computed from equation
# from SFSNet
c1 = 0.8862269254527579
c2 = 1.0233267079464883
c3 = 0.24770795610037571
c4 = 0.8580855308097834
c5 = 0.4290427654048917
nx = N[:, 0, :, :]
ny = N[:, 1, :, :]
nz = N[:, 2, :, :]
b, c, h, w = N.shape
Y1 = c1 * torch.ones(b, h, w)
Y2 = c2 * nz
Y3 = c2 * nx
Y4 = c2 * ny
Y5 = c3 * (2 * nz * nz - nx * nx - ny * ny)
Y6 = c4 * nx * nz
Y7 = c4 * ny * nz
Y8 = c5 * (nx * nx - ny * ny)
Y9 = c4 * nx * ny
L = L.type(torch.float)
sh = torch.split(L, 9, dim=1)
assert(c == len(sh))
shading = torch.zeros(b, c, h, w)
if torch.cuda.is_available():
Y1 = Y1.cuda()
shading = shading.cuda()
for j in range(c):
l = sh[j]
# Scale to 'h x w' dim
l = l.repeat(1, h*w).view(b, h, w, 9)
# Convert l into 'batch size', 'Index SH', 'h', 'w'
l = l.permute([0, 3, 1, 2])
# Generate shading
shading[:, j, :, :] = Y1 * l[:, 0] + Y2 * l[:, 1] + Y3 * l[:, 2] + \
Y4 * l[:, 3] + Y5 * l[:, 4] + Y6 * l[:, 5] + \
Y7 * l[:, 6] + Y8 * l[:, 7] + Y9 * l[:, 8]
return shading
# Base methods for creating convnet
def get_conv(in_channels, out_channels, kernel_size=3, padding=0, stride=1, dropout=0):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
# SfSNet Models
class ResNetBlock(nn.Module):
""" Basic building block of ResNet to be used for Normal and Albedo Residual Blocks
"""
def __init__(self, in_planes, out_planes, stride=1):
super(ResNetBlock, self).__init__()
self.res = nn.Sequential(
nn.BatchNorm2d(in_planes),
nn.ReLU(inplace=True),
nn.Conv2d(in_planes, in_planes, 3, stride=1, padding=1),
nn.BatchNorm2d(in_planes),
nn.ReLU(inplace=True),
nn.Conv2d(in_planes, out_planes, 3, stride=1, padding=1)
)
def forward(self, x):
residual = x
out = self.res(x)
out += residual
return out
class baseFeaturesExtractions(nn.Module):
""" Base Feature extraction
"""
def __init__(self):
super(baseFeaturesExtractions, self).__init__()
self.conv1 = get_conv(3, 64, kernel_size=7, padding=3)
self.conv2 = get_conv(64, 128, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
return out
class NormalResidualBlock(nn.Module):
""" Net to general Normal from features
"""
def __init__(self):
super(NormalResidualBlock, self).__init__()
self.block1 = ResNetBlock(128, 128)
self.block2 = ResNetBlock(128, 128)
self.block3 = ResNetBlock(128, 128)
self.block4 = ResNetBlock(128, 128)
self.block5 = ResNetBlock(128, 128)
self.bn1 = nn.BatchNorm2d(128)
def forward(self, x):
out = self.block1(x)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.block5(out)
out = F.relu(self.bn1(out))
return out
class AlbedoResidualBlock(nn.Module):
""" Net to general Albedo from features
"""
def __init__(self):
super(AlbedoResidualBlock, self).__init__()
self.block1 = ResNetBlock(128, 128)
self.block2 = ResNetBlock(128, 128)
self.block3 = ResNetBlock(128, 128)
self.block4 = ResNetBlock(128, 128)
self.block5 = ResNetBlock(128, 128)
self.bn1 = nn.BatchNorm2d(128)
def forward(self, x):
out = self.block1(x)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.block5(out)
out = F.relu(self.bn1(out))
return out
class NormalGenerationNet(nn.Module):
""" Generating Normal
"""
def __init__(self):
super(NormalGenerationNet, self).__init__()
# self.upsample = nn.UpsamplingBilinear2d(size=(128, 128), scale_factor=2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv1 = get_conv(128, 128, kernel_size=1, stride=1)
self.conv2 = get_conv(128, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 3, kernel_size=1)
def forward(self, x):
out = self.upsample(x)
out = self.conv1(out)
out = self.conv2(out)
out = self.conv3(out)
return out
class AlbedoGenerationNet(nn.Module):
""" Generating Albedo
"""
def __init__(self):
super(AlbedoGenerationNet, self).__init__()
# self.upsample = nn.UpsamplingBilinear2d(size=(128, 128), scale_factor=2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv1 = get_conv(128, 128, kernel_size=1, stride=1)
self.conv2 = get_conv(128, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 3, kernel_size=1)
def forward(self, x):
out = self.upsample(x)
out = self.conv1(out)
out = self.conv2(out)
out = self.conv3(out)
return out
class LightEstimator(nn.Module):
""" Estimate lighting from normal, albedo and conv features
"""
def __init__(self):
super(LightEstimator, self).__init__()
self.conv1 = get_conv(384, 128, kernel_size=1, stride=1)
self.pool = nn.AvgPool2d(64, stride=1,padding=0)
self.fc = nn.Linear(128, 27)
def forward(self, x):
out = self.conv1(x)
out = self.pool(out)
# reshape to batch_size x 128
out = out.view(-1, 128)
out = self.fc(out)
return out
def reconstruct_image(shading, albedo):
return shading * albedo
class SfsNetPipeline(nn.Module):
""" SfSNet Pipeline
"""
def __init__(self):
super(SfsNetPipeline, self).__init__()
self.conv_model = baseFeaturesExtractions()
self.normal_residual_model = NormalResidualBlock()
self.normal_gen_model = NormalGenerationNet()
self.albedo_residual_model = AlbedoResidualBlock()
self.albedo_gen_model = AlbedoGenerationNet()
self.light_estimator_model = LightEstimator()
def get_face(self, sh, normal, albedo):
shading = get_shading(normal, sh)
recon = reconstruct_image(shading, albedo)
return recon
def forward(self, face):
# Following is training pipeline
# 1. Pass Image from Conv Model to extract features
out_features = self.conv_model(face)
# 2 a. Pass Conv features through Normal Residual
out_normal_features = self.normal_residual_model(out_features)
# 2 b. Pass Conv features through Albedo Residual
out_albedo_features = self.albedo_residual_model(out_features)
# 3 a. Generate Normal
predicted_normal = self.normal_gen_model(out_normal_features)
# 3 b. Generate Albedo
predicted_albedo = self.albedo_gen_model(out_albedo_features)
# 3 c. Estimate lighting
# First, concat conv, normal and albedo features over channels dimension
all_features = torch.cat((out_features, out_normal_features, out_albedo_features), dim=1)
# Predict SH
predicted_sh = self.light_estimator_model(all_features)
# 4. Generate shading
out_shading = get_shading(predicted_normal, predicted_sh)
# 5. Reconstruction of image
out_recon = reconstruct_image(out_shading, predicted_albedo)
return predicted_normal, predicted_albedo, predicted_sh, out_shading, out_recon
def fix_weights(self):
dfs_freeze(self.conv_model)
dfs_freeze(self.normal_residual_model)
dfs_freeze(self.normal_gen_model)
dfs_freeze(self.albedo_residual_model)
dfs_freeze(self.light_estimator_model)
# Note that we are not freezing Albedo gen model
# Use following to fix weights of the model
# Ref - https://discuss.pytorch.org/t/how-the-pytorch-freeze-network-in-some-layers-only-the-rest-of-the-training/7088/15
def dfs_freeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = False
dfs_freeze(child)
# Following method loads author provided model weights
# Refer to model_loading_synchronization to getf following mapping
# Following mapping is auto-generated using script
def load_model_from_pretrained(src_model, dst_model):
dst_model['conv_model.conv1.0.weight'] = src_model['conv1.conv.0.weight']
dst_model['conv_model.conv1.0.bias'] = src_model['conv1.conv.0.bias']
dst_model['conv_model.conv1.1.weight'] = src_model['conv1.conv.1.weight']
dst_model['conv_model.conv1.1.bias'] = src_model['conv1.conv.1.bias']
dst_model['conv_model.conv1.1.running_mean'] = src_model['conv1.conv.1.running_mean']
dst_model['conv_model.conv1.1.running_var'] = src_model['conv1.conv.1.running_var']
dst_model['conv_model.conv2.0.weight'] = src_model['conv2.conv.0.weight']
dst_model['conv_model.conv2.0.bias'] = src_model['conv2.conv.0.bias']
dst_model['conv_model.conv2.1.weight'] = src_model['conv2.conv.1.weight']
dst_model['conv_model.conv2.1.bias'] = src_model['conv2.conv.1.bias']
dst_model['conv_model.conv2.1.running_mean'] = src_model['conv2.conv.1.running_mean']
dst_model['conv_model.conv2.1.running_var'] = src_model['conv2.conv.1.running_var']
dst_model['conv_model.conv3.weight'] = src_model['conv3.weight']
dst_model['conv_model.conv3.bias'] = src_model['conv3.bias']
dst_model['normal_residual_model.block1.res.0.weight'] = src_model['nres1.res.0.weight']
dst_model['normal_residual_model.block1.res.0.bias'] = src_model['nres1.res.0.bias']
dst_model['normal_residual_model.block1.res.0.running_mean'] = src_model['nres1.res.0.running_mean']
dst_model['normal_residual_model.block1.res.0.running_var'] = src_model['nres1.res.0.running_var']
dst_model['normal_residual_model.block1.res.2.weight'] = src_model['nres1.res.2.weight']
dst_model['normal_residual_model.block1.res.2.bias'] = src_model['nres1.res.2.bias']
dst_model['normal_residual_model.block1.res.3.weight'] = src_model['nres1.res.3.weight']
dst_model['normal_residual_model.block1.res.3.bias'] = src_model['nres1.res.3.bias']
dst_model['normal_residual_model.block1.res.3.running_mean'] = src_model['nres1.res.3.running_mean']
dst_model['normal_residual_model.block1.res.3.running_var'] = src_model['nres1.res.3.running_var']
dst_model['normal_residual_model.block1.res.5.weight'] = src_model['nres1.res.5.weight']
dst_model['normal_residual_model.block1.res.5.bias'] = src_model['nres1.res.5.bias']
dst_model['normal_residual_model.block2.res.0.weight'] = src_model['nres2.res.0.weight']
dst_model['normal_residual_model.block2.res.0.bias'] = src_model['nres2.res.0.bias']
dst_model['normal_residual_model.block2.res.0.running_mean'] = src_model['nres2.res.0.running_mean']
dst_model['normal_residual_model.block2.res.0.running_var'] = src_model['nres2.res.0.running_var']
dst_model['normal_residual_model.block2.res.2.weight'] = src_model['nres2.res.2.weight']
dst_model['normal_residual_model.block2.res.2.bias'] = src_model['nres2.res.2.bias']
dst_model['normal_residual_model.block2.res.3.weight'] = src_model['nres2.res.3.weight']
dst_model['normal_residual_model.block2.res.3.bias'] = src_model['nres2.res.3.bias']
dst_model['normal_residual_model.block2.res.3.running_mean'] = src_model['nres2.res.3.running_mean']
dst_model['normal_residual_model.block2.res.3.running_var'] = src_model['nres2.res.3.running_var']
dst_model['normal_residual_model.block2.res.5.weight'] = src_model['nres2.res.5.weight']
dst_model['normal_residual_model.block2.res.5.bias'] = src_model['nres2.res.5.bias']
dst_model['normal_residual_model.block3.res.0.weight'] = src_model['nres3.res.0.weight']
dst_model['normal_residual_model.block3.res.0.bias'] = src_model['nres3.res.0.bias']
dst_model['normal_residual_model.block3.res.0.running_mean'] = src_model['nres3.res.0.running_mean']
dst_model['normal_residual_model.block3.res.0.running_var'] = src_model['nres3.res.0.running_var']
dst_model['normal_residual_model.block3.res.2.weight'] = src_model['nres3.res.2.weight']
dst_model['normal_residual_model.block3.res.2.bias'] = src_model['nres3.res.2.bias']
dst_model['normal_residual_model.block3.res.3.weight'] = src_model['nres3.res.3.weight']
dst_model['normal_residual_model.block3.res.3.bias'] = src_model['nres3.res.3.bias']
dst_model['normal_residual_model.block3.res.3.running_mean'] = src_model['nres3.res.3.running_mean']
dst_model['normal_residual_model.block3.res.3.running_var'] = src_model['nres3.res.3.running_var']
dst_model['normal_residual_model.block3.res.5.weight'] = src_model['nres3.res.5.weight']
dst_model['normal_residual_model.block3.res.5.bias'] = src_model['nres3.res.5.bias']
dst_model['normal_residual_model.block4.res.0.weight'] = src_model['nres4.res.0.weight']
dst_model['normal_residual_model.block4.res.0.bias'] = src_model['nres4.res.0.bias']
dst_model['normal_residual_model.block4.res.0.running_mean'] = src_model['nres4.res.0.running_mean']
dst_model['normal_residual_model.block4.res.0.running_var'] = src_model['nres4.res.0.running_var']
dst_model['normal_residual_model.block4.res.2.weight'] = src_model['nres4.res.2.weight']
dst_model['normal_residual_model.block4.res.2.bias'] = src_model['nres4.res.2.bias']
dst_model['normal_residual_model.block4.res.3.weight'] = src_model['nres4.res.3.weight']
dst_model['normal_residual_model.block4.res.3.bias'] = src_model['nres4.res.3.bias']
dst_model['normal_residual_model.block4.res.3.running_mean'] = src_model['nres4.res.3.running_mean']
dst_model['normal_residual_model.block4.res.3.running_var'] = src_model['nres4.res.3.running_var']
dst_model['normal_residual_model.block4.res.5.weight'] = src_model['nres4.res.5.weight']
dst_model['normal_residual_model.block4.res.5.bias'] = src_model['nres4.res.5.bias']
dst_model['normal_residual_model.block5.res.0.weight'] = src_model['nres5.res.0.weight']
dst_model['normal_residual_model.block5.res.0.bias'] = src_model['nres5.res.0.bias']
dst_model['normal_residual_model.block5.res.0.running_mean'] = src_model['nres5.res.0.running_mean']
dst_model['normal_residual_model.block5.res.0.running_var'] = src_model['nres5.res.0.running_var']
dst_model['normal_residual_model.block5.res.2.weight'] = src_model['nres5.res.2.weight']
dst_model['normal_residual_model.block5.res.2.bias'] = src_model['nres5.res.2.bias']
dst_model['normal_residual_model.block5.res.3.weight'] = src_model['nres5.res.3.weight']
dst_model['normal_residual_model.block5.res.3.bias'] = src_model['nres5.res.3.bias']
dst_model['normal_residual_model.block5.res.3.running_mean'] = src_model['nres5.res.3.running_mean']
dst_model['normal_residual_model.block5.res.3.running_var'] = src_model['nres5.res.3.running_var']
dst_model['normal_residual_model.block5.res.5.weight'] = src_model['nres5.res.5.weight']
dst_model['normal_residual_model.block5.res.5.bias'] = src_model['nres5.res.5.bias']
dst_model['normal_residual_model.bn1.weight'] = src_model['nreso.0.weight']
dst_model['normal_residual_model.bn1.bias'] = src_model['nreso.0.bias']
dst_model['normal_residual_model.bn1.running_mean'] = src_model['nreso.0.running_mean']
dst_model['normal_residual_model.bn1.running_var'] = src_model['nreso.0.running_var']
dst_model['normal_gen_model.conv1.0.weight'] = src_model['nconv1.conv.0.weight']
dst_model['normal_gen_model.conv1.0.bias'] = src_model['nconv1.conv.0.bias']
dst_model['normal_gen_model.conv1.1.weight'] = src_model['nconv1.conv.1.weight']
dst_model['normal_gen_model.conv1.1.bias'] = src_model['nconv1.conv.1.bias']
dst_model['normal_gen_model.conv1.1.running_mean'] = src_model['nconv1.conv.1.running_mean']
dst_model['normal_gen_model.conv1.1.running_var'] = src_model['nconv1.conv.1.running_var']
dst_model['normal_gen_model.conv2.0.weight'] = src_model['nconv2.conv.0.weight']
dst_model['normal_gen_model.conv2.0.bias'] = src_model['nconv2.conv.0.bias']
dst_model['normal_gen_model.conv2.1.weight'] = src_model['nconv2.conv.1.weight']
dst_model['normal_gen_model.conv2.1.bias'] = src_model['nconv2.conv.1.bias']
dst_model['normal_gen_model.conv2.1.running_mean'] = src_model['nconv2.conv.1.running_mean']
dst_model['normal_gen_model.conv2.1.running_var'] = src_model['nconv2.conv.1.running_var']
dst_model['normal_gen_model.conv3.weight'] = src_model['nout.weight']
dst_model['normal_gen_model.conv3.bias'] = src_model['nout.bias']
dst_model['albedo_residual_model.block1.res.0.weight'] = src_model['ares1.res.0.weight']
dst_model['albedo_residual_model.block1.res.0.bias'] = src_model['ares1.res.0.bias']
dst_model['albedo_residual_model.block1.res.0.running_mean'] = src_model['ares1.res.0.running_mean']
dst_model['albedo_residual_model.block1.res.0.running_var'] = src_model['ares1.res.0.running_var']
dst_model['albedo_residual_model.block1.res.2.weight'] = src_model['ares1.res.2.weight']
dst_model['albedo_residual_model.block1.res.2.bias'] = src_model['ares1.res.2.bias']
dst_model['albedo_residual_model.block1.res.3.weight'] = src_model['ares1.res.3.weight']
dst_model['albedo_residual_model.block1.res.3.bias'] = src_model['ares1.res.3.bias']
dst_model['albedo_residual_model.block1.res.3.running_mean'] = src_model['ares1.res.3.running_mean']
dst_model['albedo_residual_model.block1.res.3.running_var'] = src_model['ares1.res.3.running_var']
dst_model['albedo_residual_model.block1.res.5.weight'] = src_model['ares1.res.5.weight']
dst_model['albedo_residual_model.block1.res.5.bias'] = src_model['ares1.res.5.bias']
dst_model['albedo_residual_model.block2.res.0.weight'] = src_model['ares2.res.0.weight']
dst_model['albedo_residual_model.block2.res.0.bias'] = src_model['ares2.res.0.bias']
dst_model['albedo_residual_model.block2.res.0.running_mean'] = src_model['ares2.res.0.running_mean']
dst_model['albedo_residual_model.block2.res.0.running_var'] = src_model['ares2.res.0.running_var']
dst_model['albedo_residual_model.block2.res.2.weight'] = src_model['ares2.res.2.weight']
dst_model['albedo_residual_model.block2.res.2.bias'] = src_model['ares2.res.2.bias']
dst_model['albedo_residual_model.block2.res.3.weight'] = src_model['ares2.res.3.weight']
dst_model['albedo_residual_model.block2.res.3.bias'] = src_model['ares2.res.3.bias']
dst_model['albedo_residual_model.block2.res.3.running_mean'] = src_model['ares2.res.3.running_mean']
dst_model['albedo_residual_model.block2.res.3.running_var'] = src_model['ares2.res.3.running_var']
dst_model['albedo_residual_model.block2.res.5.weight'] = src_model['ares2.res.5.weight']
dst_model['albedo_residual_model.block2.res.5.bias'] = src_model['ares2.res.5.bias']
dst_model['albedo_residual_model.block3.res.0.weight'] = src_model['ares3.res.0.weight']
dst_model['albedo_residual_model.block3.res.0.bias'] = src_model['ares3.res.0.bias']
dst_model['albedo_residual_model.block3.res.0.running_mean'] = src_model['ares3.res.0.running_mean']
dst_model['albedo_residual_model.block3.res.0.running_var'] = src_model['ares3.res.0.running_var']
dst_model['albedo_residual_model.block3.res.2.weight'] = src_model['ares3.res.2.weight']
dst_model['albedo_residual_model.block3.res.2.bias'] = src_model['ares3.res.2.bias']
dst_model['albedo_residual_model.block3.res.3.weight'] = src_model['ares3.res.3.weight']
dst_model['albedo_residual_model.block3.res.3.bias'] = src_model['ares3.res.3.bias']
dst_model['albedo_residual_model.block3.res.3.running_mean'] = src_model['ares3.res.3.running_mean']
dst_model['albedo_residual_model.block3.res.3.running_var'] = src_model['ares3.res.3.running_var']
dst_model['albedo_residual_model.block3.res.5.weight'] = src_model['ares3.res.5.weight']
dst_model['albedo_residual_model.block3.res.5.bias'] = src_model['ares3.res.5.bias']
dst_model['albedo_residual_model.block4.res.0.weight'] = src_model['ares4.res.0.weight']
dst_model['albedo_residual_model.block4.res.0.bias'] = src_model['ares4.res.0.bias']
dst_model['albedo_residual_model.block4.res.0.running_mean'] = src_model['ares4.res.0.running_mean']
dst_model['albedo_residual_model.block4.res.0.running_var'] = src_model['ares4.res.0.running_var']
dst_model['albedo_residual_model.block4.res.2.weight'] = src_model['ares4.res.2.weight']
dst_model['albedo_residual_model.block4.res.2.bias'] = src_model['ares4.res.2.bias']
dst_model['albedo_residual_model.block4.res.3.weight'] = src_model['ares4.res.3.weight']
dst_model['albedo_residual_model.block4.res.3.bias'] = src_model['ares4.res.3.bias']
dst_model['albedo_residual_model.block4.res.3.running_mean'] = src_model['ares4.res.3.running_mean']
dst_model['albedo_residual_model.block4.res.3.running_var'] = src_model['ares4.res.3.running_var']
dst_model['albedo_residual_model.block4.res.5.weight'] = src_model['ares4.res.5.weight']
dst_model['albedo_residual_model.block4.res.5.bias'] = src_model['ares4.res.5.bias']
dst_model['albedo_residual_model.block5.res.0.weight'] = src_model['ares5.res.0.weight']
dst_model['albedo_residual_model.block5.res.0.bias'] = src_model['ares5.res.0.bias']
dst_model['albedo_residual_model.block5.res.0.running_mean'] = src_model['ares5.res.0.running_mean']
dst_model['albedo_residual_model.block5.res.0.running_var'] = src_model['ares5.res.0.running_var']
dst_model['albedo_residual_model.block5.res.2.weight'] = src_model['ares5.res.2.weight']
dst_model['albedo_residual_model.block5.res.2.bias'] = src_model['ares5.res.2.bias']
dst_model['albedo_residual_model.block5.res.3.weight'] = src_model['ares5.res.3.weight']
dst_model['albedo_residual_model.block5.res.3.bias'] = src_model['ares5.res.3.bias']
dst_model['albedo_residual_model.block5.res.3.running_mean'] = src_model['ares5.res.3.running_mean']
dst_model['albedo_residual_model.block5.res.3.running_var'] = src_model['ares5.res.3.running_var']
dst_model['albedo_residual_model.block5.res.5.weight'] = src_model['ares5.res.5.weight']
dst_model['albedo_residual_model.block5.res.5.bias'] = src_model['ares5.res.5.bias']
dst_model['albedo_residual_model.bn1.weight'] = src_model['areso.0.weight']
dst_model['albedo_residual_model.bn1.bias'] = src_model['areso.0.bias']
dst_model['albedo_residual_model.bn1.running_mean'] = src_model['areso.0.running_mean']
dst_model['albedo_residual_model.bn1.running_var'] = src_model['areso.0.running_var']
dst_model['albedo_gen_model.conv1.0.weight'] = src_model['aconv1.conv.0.weight']
dst_model['albedo_gen_model.conv1.0.bias'] = src_model['aconv1.conv.0.bias']
dst_model['albedo_gen_model.conv1.1.weight'] = src_model['aconv1.conv.1.weight']
dst_model['albedo_gen_model.conv1.1.bias'] = src_model['aconv1.conv.1.bias']
dst_model['albedo_gen_model.conv1.1.running_mean'] = src_model['aconv1.conv.1.running_mean']
dst_model['albedo_gen_model.conv1.1.running_var'] = src_model['aconv1.conv.1.running_var']
dst_model['albedo_gen_model.conv2.0.weight'] = src_model['aconv2.conv.0.weight']
dst_model['albedo_gen_model.conv2.0.bias'] = src_model['aconv2.conv.0.bias']
dst_model['albedo_gen_model.conv2.1.weight'] = src_model['aconv2.conv.1.weight']
dst_model['albedo_gen_model.conv2.1.bias'] = src_model['aconv2.conv.1.bias']
dst_model['albedo_gen_model.conv2.1.running_mean'] = src_model['aconv2.conv.1.running_mean']
dst_model['albedo_gen_model.conv2.1.running_var'] = src_model['aconv2.conv.1.running_var']
dst_model['albedo_gen_model.conv3.weight'] = src_model['aout.weight']
dst_model['albedo_gen_model.conv3.bias'] = src_model['aout.bias']
dst_model['light_estimator_model.conv1.0.weight'] = src_model['lconv.conv.0.weight']
dst_model['light_estimator_model.conv1.0.bias'] = src_model['lconv.conv.0.bias']
dst_model['light_estimator_model.conv1.1.weight'] = src_model['lconv.conv.1.weight']
dst_model['light_estimator_model.conv1.1.bias'] = src_model['lconv.conv.1.bias']
dst_model['light_estimator_model.conv1.1.running_mean'] = src_model['lconv.conv.1.running_mean']
dst_model['light_estimator_model.conv1.1.running_var'] = src_model['lconv.conv.1.running_var']
dst_model['light_estimator_model.fc.weight'] = src_model['lout.weight']
dst_model['light_estimator_model.fc.bias'] = src_model['lout.bias']
return dst_model
#### FOLLOWING IS SKIP NET IMPLEMENTATION
# Base methods for creating convnet
def get_skipnet_conv(in_channels, out_channels, kernel_size=3, padding=0, stride=1):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2)
)
def get_skipnet_deconv(in_channels, out_channels, kernel_size=3, padding=0, stride=1):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2)
)
class SkipNet_Encoder(nn.Module):
def __init__(self):
super(SkipNet_Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1)
self.conv2 = get_skipnet_conv(64, 128, kernel_size=4, stride=2, padding=1)
self.conv3 = get_skipnet_conv(128, 256, kernel_size=4, stride=2, padding=1)
self.conv4 = get_skipnet_conv(256, 256, kernel_size=4, stride=2, padding=1)
self.conv5 = get_skipnet_conv(256, 256, kernel_size=4, stride=2, padding=1)
self.fc256 = nn.Linear(4096, 256)
def get_face(self, sh, normal, albedo):
shading = get_shading(normal, sh)
recon = reconstruct_image(shading, albedo)
return recon
def forward(self, x):
# print('0 ', x.shape )
out_1 = self.conv1(x)
# print('1 ', out_1.shape)
out_2 = self.conv2(out_1)
# print('2 ', out_2.shape)
out_3 = self.conv3(out_2)
# print('3 ', out_3.shape)
out_4 = self.conv4(out_3)
# print('4 ', out_4.shape)
out = self.conv5(out_4)
# print('5 ', out.shape)
out = out.view(out.shape[0], -1)
# print(out.shape)
out = self.fc256(out)
return out, out_1, out_2, out_3, out_4
class SkipNet_Decoder(nn.Module):
def __init__(self):
super(SkipNet_Decoder, self).__init__()
self.dconv1 = get_skipnet_deconv(256, 256, kernel_size=4, stride=2, padding=1)
self.dconv2 = get_skipnet_deconv(256, 256, kernel_size=4, stride=2, padding=1)
self.dconv3 = get_skipnet_deconv(256, 128, kernel_size=4, stride=2, padding=1)
self.dconv4 = get_skipnet_deconv(128, 64, kernel_size=4, stride=2, padding=1)
self.dconv5 = get_skipnet_deconv(64, 64, kernel_size=4, stride=2, padding=1)
self.conv6 = nn.Conv2d(64, 3, kernel_size=1, stride=1)
def forward(self, x, out_1, out_2, out_3, out_4):
# print('-0 ', x.shape)
out = self.dconv1(x)
# print('-1 ', out.shape, out_4.shape)
out += out_4
out = self.dconv2(out)
# print('-2 ', out.shape, out_3.shape)
out += out_3
out = self.dconv3(out)
# print('-3 ', out.shape, out_2.shape)
out += out_2
out = self.dconv4(out)
# print('-4 ', out.shape, out_1.shape)
out += out_1
out = self.dconv5(out)
# print('-5 ', out.shape)
out = self.conv6(out)
# print('-6 ', out.shape)
return out
class SkipNet(nn.Module):
def __init__(self):
super(SkipNet, self).__init__()
self.encoder = SkipNet_Encoder()
self.normal_mlp = nn.Upsample(scale_factor=4, mode='bilinear')
self.albedo_mlp = nn.Upsample(scale_factor=4, mode='bilinear')
self.light_decoder = nn.Linear(256, 27)
self.normal_decoder = SkipNet_Decoder()
self.albedo_decoder = SkipNet_Decoder()
def get_face(self, sh, normal, albedo):
shading = get_shading(normal, sh)
recon = reconstruct_image(shading, albedo)
return recon
def forward(self, x):
out, skip_1, skip_2, skip_3, skip_4 = self.encoder(x)
out_mlp = out.unsqueeze(2)
out_mlp = out_mlp.unsqueeze(3)
# print(out_mlp.shape, out.shape)
out_normal = self.normal_mlp(out_mlp)
out_albedo = self.albedo_mlp(out_mlp)
# print(out_normal.shape)
light = self.light_decoder(out)
normal = self.normal_decoder(out_normal, skip_1, skip_2, skip_3, skip_4)
albedo = self.albedo_decoder(out_albedo, skip_1, skip_2, skip_3, skip_4)
shading = get_shading(normal, light)
recon = reconstruct_image(shading, albedo)
return normal, albedo, light, shading, recon
| 31,228 | 50.279146 | 121 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/shading.py | import torch
import torchvision
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms
import pandas as pd
from utils import *
from models import sfsNetShading
# def var(x):
# if torch.cuda.is_available():
# x = x.cuda()
# return Variable(x)
# Start of log based shading generation method
# Credits: Zhixin Shu for providing following method
class waspShadeRenderer(nn.Module):
def __init__(self, opt):
super(waspShadeRenderer, self).__init__()
self.opt = opt
self.getHomo = HomogeneousCoord(opt)
self.getMMatrix = MMatrix(opt)
def forward(self, light, normals):
# homogeneous coordinate of the normals
#normals = var(normals).type(torch.DoubleTensor)
batchSize = normals.size(0)
W = normals.size(2)
H = normals.size(3)
hNormals = self.getHomo(normals)
# matrix for light
mLight = self.getMMatrix(light)
# get shading from these two: N x 4 , N = batchSize x W x H
hN_vec = hNormals.view(batchSize, 4, -1).permute(0,2,1).contiguous().view(-1,4)
# N x 1 x 4
hN_vec_Left = hN_vec.unsqueeze(1)
# N x 4 x 1
hN_vec_Right = hN_vec.unsqueeze(2)
# expand the lighting from batchSize x 4 x 4 to N x 4 x 4
hL = mLight.view(batchSize,16).repeat(1,W*H).view(-1,4,4).type(torch.float)
shade0 = torch.matmul(hN_vec_Left, hL)
shade1 = torch.matmul(shade0, hN_vec_Right)
#shade1 is tensor of size Nx1x1 = batchSize x W x H
shading = shade1.view(batchSize,W,H).unsqueeze(1)
return shading
class HomogeneousCoord(nn.Module):
"""docstring for getHomogeneousCoord"""
def __init__(self, opt):
super(HomogeneousCoord, self).__init__()
self.opt = opt
def forward(self, x):
y = torch.ones(x.size(0),1,x.size(2),x.size(3))
z = torch.cat((x,y),1)
return z
class MMatrix(nn.Module):
"""docstring for getHomogeneousCoord"""
def __init__(self, opt):
super(MMatrix, self).__init__()
self.opt = opt
def forward(self, L):
# input L:[batchSize,9]
# output M: [batchSize, 4, 4]
c1 = 0.429043
c2 = 0.511664
c3 = 0.743152
c4 = 0.886227
c5 = 0.247708
M00 = c1*L[:,8].unsqueeze(1)
M01 = c1*L[:,4].unsqueeze(1)
M02 = c1*L[:,7].unsqueeze(1)
M03 = c2*L[:,3].unsqueeze(1)
M10 = c1*L[:,4].unsqueeze(1)
M11 = -c1*L[:,8].unsqueeze(1)
M12 = c1*L[:,5].unsqueeze(1)
M13 = c2*L[:,1].unsqueeze(1)
M20 = c1*L[:,7].unsqueeze(1)
M21 = c1*L[:,5].unsqueeze(1)
M22 = c3*L[:,6].unsqueeze(1)
M23 = c2*L[:,2].unsqueeze(1)
M30 = c2*L[:,3].unsqueeze(1)
M31 = c2*L[:,1].unsqueeze(1)
M32 = c2*L[:,2].unsqueeze(1)
M33 = c4*L[:,0].unsqueeze(1) - c5*L[:,6].unsqueeze(1)
M0 = torch.cat((M00,M01,M02,M03),dim=1).unsqueeze(1)
M1 = torch.cat((M10,M11,M12,M13),dim=1).unsqueeze(1)
M2 = torch.cat((M20,M21,M22,M23),dim=1).unsqueeze(1)
M3 = torch.cat((M30,M31,M32,M33),dim=1).unsqueeze(1)
M = torch.cat((M0,M1,M2,M3),dim=1)
return M
# End of log based shading generation method
def getShadingFromNormalAndSH(Normal, rSH):
shader = waspShadeRenderer(None)
#print('SHader size:', Normal.size())
rSH = rSH.view(rSH.shape[0], rSH.shape[2])
sh1, sh2, sh3 = torch.split(rSH, 9, dim=1)
out1 = shader(sh1, Normal)
out2 = shader(sh2, Normal)
out3 = shader(sh3, Normal)
outShadingB = torch.cat((out1, out2, out3), 1)
return outShadingB
def validate_shading_method(train_dl):
albedo, normal, mask, sh, face = next(iter(train_dl))
shading = getShadingFromNormalAndSH(normal, sh)
save_image(albedo, denormalize=False, mask=mask, path='./results/shading_from_normal/albedo.png')
save_image(normal, denormalize=False, mask=mask, path='./results/shading_from_normal/normal.png')
save_image(shading, denormalize=False, mask=mask, path='./results/shading_from_normal/shading_ours.png')
recon = shading * albedo
save_image(recon, mask=mask, denormalize=False, path='./results/shading_from_normal/recon_ours.png')
save_image(face, mask=mask, path = './results/shading_from_normal/recon_groundtruth.png')
recon = applyMask(recon, mask)
face = applyMask(face, mask)
mseLoss = nn.L1Loss()
print('L1Loss Ours: ', mseLoss(face, recon).item())
sfsnet_shading_net = sfsNetShading()
sh = sh.view(sh.shape[0], sh.shape[2])
sfs_shading = sfsnet_shading_net(normal, sh)
save_image(sfs_shading, mask=mask, denormalize=False, path='./results/shading_from_normal/shading_sfsnet.png')
recon = sfs_shading * albedo
save_image(recon, mask=mask, denormalize=False, path='./results/shading_from_normal/recon_sfsnet.png')
recon = applyMask(recon, mask)
face = applyMask(face, mask)
mseLoss = nn.L1Loss()
print('L1Loss SFSNet: ', mseLoss(face, recon).item()) | 5,090 | 36.160584 | 114 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/train.py | import torch
import torch.nn as nn
import numpy as np
import os
from models import *
from utils import *
from data_loading import *
## TODOS:
## 1. Dump SH in file
##
##
## Notes:
## 1. SH is not normalized
## 2. Face is normalized and denormalized - shall we not normalize in the first place?
# Enable WANDB Logging
WANDB_ENABLE = True
def predict_celeba(sfs_net_model, dl, train_epoch_num = 0,
use_cuda = False, out_folder = None, wandb = None, suffix = 'CelebA_Val', dump_all_images = False):
# debugging flag to dump image
fix_bix_dump = 0
recon_loss = nn.L1Loss()
if use_cuda:
recon_loss = recon_loss.cuda()
tloss = 0 # Total loss
rloss = 0 # Reconstruction loss
for bix, data in enumerate(dl):
face = data
if use_cuda:
face = face.cuda()
# predicted_face == reconstruction
predicted_normal, predicted_albedo, predicted_sh, predicted_shading, predicted_face = sfs_net_model(face)
if bix == fix_bix_dump or dump_all_images:
# save predictions in log folder
file_name = out_folder + suffix + '_' + str(train_epoch_num) + '_' + str(bix)
# log images
predicted_normal = get_normal_in_range(predicted_normal)
wandb_log_images(wandb, predicted_normal, None, suffix+' Predicted Normal', train_epoch_num, suffix+' Predicted Normal', path=file_name + '_predicted_normal.png')
wandb_log_images(wandb, predicted_albedo, None, suffix +' Predicted Albedo', train_epoch_num, suffix+' Predicted Albedo', path=file_name + '_predicted_albedo.png')
wandb_log_images(wandb, predicted_shading, None, suffix+' Predicted Shading', train_epoch_num, suffix+' Predicted Shading', path=file_name + '_predicted_shading.png', denormalize=False)
wandb_log_images(wandb, predicted_face, None, suffix+' Predicted face', train_epoch_num, suffix+' Predicted face', path=file_name + '_predicted_face.png', denormalize=False)
wandb_log_images(wandb, face, None, suffix+' Ground Truth', train_epoch_num, suffix+' Ground Truth', path=file_name + '_gt_face.png')
# TODO:
# Dump SH as CSV or TXT file
# Loss computation
# Reconstruction loss
total_loss = recon_loss(predicted_face, face)
# Logging for display and debugging purposes
tloss += total_loss.item()
len_dl = len(dl)
wandb.log({suffix+' Total loss': tloss/len_dl}, step=train_epoch_num)
# return average loss over dataset
return tloss / len_dl
def predict_sfsnet(sfs_net_model, dl, train_epoch_num = 0,
use_cuda = False, out_folder = None, wandb = None, suffix = 'Val'):
# debugging flag to dump image
fix_bix_dump = 0
normal_loss = nn.L1Loss()
albedo_loss = nn.L1Loss()
sh_loss = nn.MSELoss()
recon_loss = nn.L1Loss()
lamda_recon = 0.5
lamda_albedo = 0.5
lamda_normal = 0.5
lamda_sh = 0.1
if use_cuda:
normal_loss = normal_loss.cuda()
albedo_loss = albedo_loss.cuda()
sh_loss = sh_loss.cuda()
recon_loss = recon_loss.cuda()
tloss = 0 # Total loss
nloss = 0 # Normal loss
aloss = 0 # Albedo loss
shloss = 0 # SH loss
rloss = 0 # Reconstruction loss
for bix, data in enumerate(dl):
albedo, normal, mask, sh, face = data
if use_cuda:
albedo = albedo.cuda()
normal = normal.cuda()
mask = mask.cuda()
sh = sh.cuda()
face = face.cuda()
# Apply Mask on input image
# face = applyMask(face, mask)
# predicted_face == reconstruction
predicted_normal, predicted_albedo, predicted_sh, predicted_shading, predicted_face = sfs_net_model(face)
if bix == fix_bix_dump:
# save predictions in log folder
file_name = out_folder + suffix + '_' + str(train_epoch_num) + '_' + str(fix_bix_dump)
# log images
save_p_normal = get_normal_in_range(predicted_normal)
save_gt_normal = get_normal_in_range(normal)
wandb_log_images(wandb, save_p_normal, mask, suffix+' Predicted Normal', train_epoch_num, suffix+' Predicted Normal', path=file_name + '_predicted_normal.png')
wandb_log_images(wandb, predicted_albedo, mask, suffix +' Predicted Albedo', train_epoch_num, suffix+' Predicted Albedo', path=file_name + '_predicted_albedo.png')
wandb_log_images(wandb, predicted_shading, mask, suffix+' Predicted Shading', train_epoch_num, suffix+' Predicted Shading', path=file_name + '_predicted_shading.png', denormalize=False)
wandb_log_images(wandb, predicted_face, mask, suffix+' Predicted face', train_epoch_num, suffix+' Predicted face', path=file_name + '_predicted_face.png', denormalize=False)
wandb_log_images(wandb, face, mask, suffix+' Ground Truth', train_epoch_num, suffix+' Ground Truth', path=file_name + '_gt_face.png')
wandb_log_images(wandb, save_gt_normal, mask, suffix+' Ground Truth Normal', train_epoch_num, suffix+' Ground Normal', path=file_name + '_gt_normal.png')
wandb_log_images(wandb, albedo, mask, suffix+' Ground Truth Albedo', train_epoch_num, suffix+' Ground Albedo', path=file_name + '_gt_albedo.png')
# Get face with real SH
real_sh_face = sfs_net_model.get_face(sh, predicted_normal, predicted_albedo)
wandb_log_images(wandb, real_sh_face, mask, 'Val Real SH Predicted Face', train_epoch_num, 'Val Real SH Predicted Face', path=file_name + '_real_sh_face.png')
syn_face = sfs_net_model.get_face(sh, normal, albedo)
wandb_log_images(wandb, syn_face, mask, 'Val Real SH GT Face', train_epoch_num, 'Val Real SH GT Face', path=file_name + '_syn_gt_face.png')
# TODO:
# Dump SH as CSV or TXT file
# Loss computation
# Normal loss
current_normal_loss = normal_loss(predicted_normal, normal)
# Albedo loss
current_albedo_loss = albedo_loss(predicted_albedo, albedo)
# SH loss
current_sh_loss = sh_loss(predicted_sh, sh)
# Reconstruction loss
current_recon_loss = recon_loss(predicted_face, face)
total_loss = lamda_recon * current_recon_loss + lamda_normal * current_normal_loss \
+ lamda_albedo * current_albedo_loss + lamda_sh * current_sh_loss
# Logging for display and debugging purposes
tloss += total_loss.item()
nloss += current_normal_loss.item()
aloss += current_albedo_loss.item()
shloss += current_sh_loss.item()
rloss += current_recon_loss.item()
len_dl = len(dl)
wandb.log({suffix+' Total loss': tloss/len_dl, 'Val Albedo loss': aloss/len_dl, 'Val Normal loss': nloss/len_dl, \
'Val SH loss': shloss/len_dl, 'Val Recon loss': rloss/len_dl}, step=train_epoch_num)
# return average loss over dataset
return tloss / len_dl, nloss / len_dl, aloss / len_dl, shloss / len_dl, rloss / len_dl
def train_synthetic(sfs_net_model, syn_data, celeba_data=None, read_first=None,
batch_size = 10, num_epochs = 10, log_path = './results/metadata/', use_cuda=False, wandb=None,
lr = 0.01, wt_decay=0.005, training_syn=False):
# data processing
syn_train_csv = syn_data + '/train.csv'
syn_test_csv = syn_data + '/test.csv'
celeba_train_csv = None
celeba_test_csv = None
val_celeba_dl = None
if celeba_data is not None:
celeba_train_csv = celeba_data + '/train.csv'
celeba_test_csv = celeba_data + '/test.csv'
if training_syn:
celeba_dt, _ = get_celeba_dataset(read_from_csv=celeba_train_csv, read_first=batch_size, validation_split=0)
val_celeba_dl = DataLoader(celeba_dt, batch_size=batch_size, shuffle=True)
# Load Synthetic dataset
train_dataset, val_dataset = get_sfsnet_dataset(syn_dir=syn_data+'train/', read_from_csv=syn_train_csv, read_celeba_csv=celeba_train_csv, read_first=read_first, validation_split=2, training_syn = training_syn)
test_dataset, _ = get_sfsnet_dataset(syn_dir=syn_data+'test/', read_from_csv=syn_test_csv, read_celeba_csv=celeba_test_csv, read_first=100, validation_split=0, training_syn = training_syn)
syn_train_dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
syn_val_dl = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
syn_test_dl = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
print('Synthetic dataset: Train data: ', len(syn_train_dl), ' Val data: ', len(syn_val_dl), ' Test data: ', len(syn_test_dl))
model_checkpoint_dir = log_path + 'checkpoints/'
out_images_dir = log_path + 'out_images/'
out_syn_images_dir = out_images_dir
os.system('mkdir -p {}'.format(model_checkpoint_dir))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'train/'))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'val/'))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'test/'))
if val_celeba_dl is not None:
os.system('mkdir -p {}'.format(out_syn_images_dir + 'celeba_val/'))
# Collect model parameters
model_parameters = sfs_net_model.parameters()
optimizer = torch.optim.Adam(model_parameters, lr=lr, weight_decay=wt_decay)
normal_loss = nn.MSELoss()
albedo_loss = nn.MSELoss()
sh_loss = nn.MSELoss()
recon_loss = nn.MSELoss()
if use_cuda:
normal_loss = normal_loss.cuda()
albedo_loss = albedo_loss.cuda()
sh_loss = sh_loss.cuda()
recon_loss = recon_loss.cuda()
lamda_recon = 1
lamda_albedo = 1
lamda_normal = 1
lamda_sh = 1
if use_cuda:
normal_loss = normal_loss.cuda()
albedo_loss = albedo_loss.cuda()
sh_loss = sh_loss.cuda()
recon_loss = recon_loss.cuda()
syn_train_len = len(syn_train_dl)
for epoch in range(1, num_epochs+1):
tloss = 0 # Total loss
nloss = 0 # Normal loss
aloss = 0 # Albedo loss
shloss = 0 # SH loss
rloss = 0 # Reconstruction loss
for bix, data in enumerate(syn_train_dl):
albedo, normal, mask, sh, face = data
if use_cuda:
albedo = albedo.cuda()
normal = normal.cuda()
mask = mask.cuda()
sh = sh.cuda()
face = face.cuda()
# Apply Mask on input image
# face = applyMask(face, mask)
predicted_normal, predicted_albedo, predicted_sh, out_shading, out_recon = sfs_net_model(face)
# Loss computation
# Normal loss
current_normal_loss = normal_loss(predicted_normal, normal)
# Albedo loss
current_albedo_loss = albedo_loss(predicted_albedo, albedo)
# SH loss
current_sh_loss = sh_loss(predicted_sh, sh)
# Reconstruction loss
# Edge case: Shading generation requires denormalized normal and sh
# Hence, denormalizing face here
current_recon_loss = recon_loss(out_recon, face)
total_loss = lamda_normal * current_normal_loss \
+ lamda_albedo * current_albedo_loss + lamda_sh * current_sh_loss # + lamda_recon * current_recon_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# Logging for display and debugging purposes
tloss += total_loss.item()
nloss += current_normal_loss.item()
aloss += current_albedo_loss.item()
shloss += current_sh_loss.item()
rloss += current_recon_loss.item()
print('Epoch: {} - Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}'.format(epoch, tloss, \
nloss, aloss, shloss, rloss))
log_prefix = 'Syn Data'
if celeba_data is not None:
log_prefix = 'Mix Data '
if epoch % 1 == 0:
print('Training set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}'.format(tloss / syn_train_len, \
nloss / syn_train_len, aloss / syn_train_len, shloss / syn_train_len, rloss / syn_train_len))
# Log training info
wandb.log({log_prefix + 'Train Total loss': tloss/syn_train_len, log_prefix + 'Train Albedo loss': aloss/syn_train_len, log_prefix + 'Train Normal loss': nloss/syn_train_len, \
log_prefix + 'Train SH loss': shloss/syn_train_len, log_prefix + 'Train Recon loss': rloss/syn_train_len})
# Log images in wandb
file_name = out_syn_images_dir + 'train/' + 'train_' + str(epoch)
save_p_normal = get_normal_in_range(predicted_normal)
save_gt_normal = get_normal_in_range(normal)
wandb_log_images(wandb, save_p_normal, mask, 'Train Predicted Normal', epoch, 'Train Predicted Normal', path=file_name + '_predicted_normal.png')
wandb_log_images(wandb, predicted_albedo, mask, 'Train Predicted Albedo', epoch, 'Train Predicted Albedo', path=file_name + '_predicted_albedo.png')
wandb_log_images(wandb, out_shading, mask, 'Train Predicted Shading', epoch, 'Train Predicted Shading', path=file_name + '_predicted_shading.png', denormalize=False)
wandb_log_images(wandb, out_recon, mask, 'Train Recon', epoch, 'Train Recon', path=file_name + '_predicted_face.png')
wandb_log_images(wandb, face, mask, 'Train Ground Truth', epoch, 'Train Ground Truth', path=file_name + '_gt_face.png')
wandb_log_images(wandb, save_gt_normal, mask, 'Train Ground Truth Normal', epoch, 'Train Ground Truth Normal', path=file_name + '_gt_normal.png')
wandb_log_images(wandb, albedo, mask, 'Train Ground Truth Albedo', epoch, 'Train Ground Truth Albedo', path=file_name + '_gt_albedo.png')
# Get face with real_sh, predicted normal and albedo for debugging
real_sh_face = sfs_net_model.get_face(sh, predicted_normal, predicted_albedo)
syn_face = sfs_net_model.get_face(sh, normal, albedo)
wandb_log_images(wandb, real_sh_face, mask, 'Train Real SH Predicted Face', epoch, 'Train Real SH Predicted Face', path=file_name + '_real_sh_face.png')
wandb_log_images(wandb, syn_face, mask, 'Train Real SH GT Face', epoch, 'Train Real SH GT Face', path=file_name + '_syn_gt_face.png')
v_total, v_normal, v_albedo, v_sh, v_recon = predict_sfsnet(sfs_net_model, syn_val_dl, train_epoch_num=epoch, use_cuda=use_cuda,
out_folder=out_syn_images_dir+'/val/', wandb=wandb)
wandb.log({log_prefix + 'Val Total loss': v_total, log_prefix + 'Val Albedo loss': v_albedo, log_prefix + 'Val Normal loss': v_normal, \
log_prefix + 'Val SH loss': v_sh, log_prefix + 'Val Recon loss': v_recon})
print('Val set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}'.format(v_total,
v_normal, v_albedo, v_sh, v_recon))
if val_celeba_dl is not None:
predict_celeba(sfs_net_model, val_celeba_dl, train_epoch_num = 0,
use_cuda = use_cuda, out_folder = out_syn_images_dir + 'celeba_val/', wandb = wandb, dump_all_images = True)
# Model saving
torch.save(sfs_net_model.state_dict(), model_checkpoint_dir + 'skipnet_model.pkl')
if epoch % 5 == 0:
t_total, t_normal, t_albedo, t_sh, t_recon = predict_sfsnet(sfs_net_model, syn_test_dl, train_epoch_num=epoch, use_cuda=use_cuda,
out_folder=out_syn_images_dir + '/test/', wandb=wandb, suffix='Test')
wandb.log({log_prefix+'Test Total loss': t_total, log_prefix+'Test Albedo loss': t_albedo, log_prefix+'Test Normal loss': t_normal, \
log_prefix+ 'Test SH loss': t_sh, log_prefix+'Test Recon loss': t_recon})
print('Test-set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}\n'.format(t_total,
t_normal, t_albedo, t_sh, t_recon))
def train(sfs_net_model, syn_data, celeba_data=None, read_first=None,
batch_size = 10, num_epochs = 10, log_path = './results/metadata/', use_cuda=False, wandb=None,
lr = 0.01, wt_decay=0.005, training_syn=False):
# data processing
syn_train_csv = syn_data + '/train.csv'
syn_test_csv = syn_data + '/test.csv'
celeba_train_csv = None
celeba_test_csv = None
val_celeba_dl = None
if celeba_data is not None:
celeba_train_csv = celeba_data + '/train.csv'
celeba_test_csv = celeba_data + '/test.csv'
if training_syn:
celeba_dt, _ = get_celeba_dataset(read_from_csv=celeba_train_csv, read_first=batch_size, validation_split=0)
val_celeba_dl = DataLoader(celeba_dt, batch_size=batch_size, shuffle=True)
# Load Synthetic dataset
train_dataset, val_dataset = get_sfsnet_dataset(syn_dir=syn_data+'train/', read_from_csv=syn_train_csv, read_celeba_csv=celeba_train_csv, read_first=read_first, validation_split=2, training_syn = training_syn)
test_dataset, _ = get_sfsnet_dataset(syn_dir=syn_data+'test/', read_from_csv=syn_test_csv, read_celeba_csv=celeba_test_csv, read_first=100, validation_split=0, training_syn = training_syn)
syn_train_dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
syn_val_dl = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
syn_test_dl = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
print('Synthetic dataset: Train data: ', len(syn_train_dl), ' Val data: ', len(syn_val_dl), ' Test data: ', len(syn_test_dl))
model_checkpoint_dir = log_path + 'checkpoints/'
out_images_dir = log_path + 'out_images/'
out_syn_images_dir = out_images_dir
os.system('mkdir -p {}'.format(model_checkpoint_dir))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'train/'))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'val/'))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'test/'))
if val_celeba_dl is not None:
os.system('mkdir -p {}'.format(out_syn_images_dir + 'celeba_val/'))
# Collect model parameters
model_parameters = sfs_net_model.parameters()
optimizer = torch.optim.Adam(model_parameters, lr=lr, weight_decay=wt_decay)
normal_loss = nn.L1Loss()
albedo_loss = nn.L1Loss()
sh_loss = nn.MSELoss()
recon_loss = nn.L1Loss()
if use_cuda:
normal_loss = normal_loss.cuda()
albedo_loss = albedo_loss.cuda()
sh_loss = sh_loss.cuda()
recon_loss = recon_loss.cuda()
lamda_recon = 0.5
lamda_albedo = 0.5
lamda_normal = 0.5
lamda_sh = 0.1
if use_cuda:
normal_loss = normal_loss.cuda()
albedo_loss = albedo_loss.cuda()
sh_loss = sh_loss.cuda()
recon_loss = recon_loss.cuda()
syn_train_len = len(syn_train_dl)
for epoch in range(1, num_epochs+1):
tloss = 0 # Total loss
nloss = 0 # Normal loss
aloss = 0 # Albedo loss
shloss = 0 # SH loss
rloss = 0 # Reconstruction loss
for bix, data in enumerate(syn_train_dl):
albedo, normal, mask, sh, face = data
if use_cuda:
albedo = albedo.cuda()
normal = normal.cuda()
mask = mask.cuda()
sh = sh.cuda()
face = face.cuda()
# Apply Mask on input image
# face = applyMask(face, mask)
predicted_normal, predicted_albedo, predicted_sh, out_shading, out_recon = sfs_net_model(face)
# Loss computation
# Normal loss
current_normal_loss = normal_loss(predicted_normal, normal)
# Albedo loss
current_albedo_loss = albedo_loss(predicted_albedo, albedo)
# SH loss
current_sh_loss = sh_loss(predicted_sh, sh)
# Reconstruction loss
# Edge case: Shading generation requires denormalized normal and sh
# Hence, denormalizing face here
current_recon_loss = recon_loss(out_recon, face)
total_loss = lamda_normal * current_normal_loss \
+ lamda_albedo * current_albedo_loss + lamda_sh * current_sh_loss + lamda_recon * current_recon_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# Logging for display and debugging purposes
tloss += total_loss.item()
nloss += current_normal_loss.item()
aloss += current_albedo_loss.item()
shloss += current_sh_loss.item()
rloss += current_recon_loss.item()
print('Epoch: {} - Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}'.format(epoch, tloss, \
nloss, aloss, shloss, rloss))
log_prefix = 'Syn Data'
if celeba_data is not None:
log_prefix = 'Mix Data '
if epoch % 1 == 0:
print('Training set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}'.format(tloss / syn_train_len, \
nloss / syn_train_len, aloss / syn_train_len, shloss / syn_train_len, rloss / syn_train_len))
# Log training info
wandb.log({log_prefix + 'Train Total loss': tloss/syn_train_len, log_prefix + 'Train Albedo loss': aloss/syn_train_len, log_prefix + 'Train Normal loss': nloss/syn_train_len, \
log_prefix + 'Train SH loss': shloss/syn_train_len, log_prefix + 'Train Recon loss': rloss/syn_train_len})
# Log images in wandb
file_name = out_syn_images_dir + 'train/' + 'train_' + str(epoch)
save_p_normal = get_normal_in_range(predicted_normal)
save_gt_normal = get_normal_in_range(normal)
wandb_log_images(wandb, save_p_normal, mask, 'Train Predicted Normal', epoch, 'Train Predicted Normal', path=file_name + '_predicted_normal.png')
wandb_log_images(wandb, predicted_albedo, mask, 'Train Predicted Albedo', epoch, 'Train Predicted Albedo', path=file_name + '_predicted_albedo.png')
wandb_log_images(wandb, out_shading, mask, 'Train Predicted Shading', epoch, 'Train Predicted Shading', path=file_name + '_predicted_shading.png', denormalize=False)
wandb_log_images(wandb, out_recon, mask, 'Train Recon', epoch, 'Train Recon', path=file_name + '_predicted_face.png')
wandb_log_images(wandb, face, mask, 'Train Ground Truth', epoch, 'Train Ground Truth', path=file_name + '_gt_face.png')
wandb_log_images(wandb, save_gt_normal, mask, 'Train Ground Truth Normal', epoch, 'Train Ground Truth Normal', path=file_name + '_gt_normal.png')
wandb_log_images(wandb, albedo, mask, 'Train Ground Truth Albedo', epoch, 'Train Ground Truth Albedo', path=file_name + '_gt_albedo.png')
# Get face with real_sh, predicted normal and albedo for debugging
real_sh_face = sfs_net_model.get_face(sh, predicted_normal, predicted_albedo)
syn_face = sfs_net_model.get_face(sh, normal, albedo)
wandb_log_images(wandb, real_sh_face, mask, 'Train Real SH Predicted Face', epoch, 'Train Real SH Predicted Face', path=file_name + '_real_sh_face.png')
wandb_log_images(wandb, syn_face, mask, 'Train Real SH GT Face', epoch, 'Train Real SH GT Face', path=file_name + '_syn_gt_face.png')
v_total, v_normal, v_albedo, v_sh, v_recon = predict_sfsnet(sfs_net_model, syn_val_dl, train_epoch_num=epoch, use_cuda=use_cuda,
out_folder=out_syn_images_dir+'/val/', wandb=wandb)
wandb.log({log_prefix + 'Val Total loss': v_total, log_prefix + 'Val Albedo loss': v_albedo, log_prefix + 'Val Normal loss': v_normal, \
log_prefix + 'Val SH loss': v_sh, log_prefix + 'Val Recon loss': v_recon})
print('Val set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}'.format(v_total,
v_normal, v_albedo, v_sh, v_recon))
if val_celeba_dl is not None:
predict_celeba(sfs_net_model, val_celeba_dl, train_epoch_num = 0,
use_cuda = use_cuda, out_folder = out_syn_images_dir + 'celeba_val/', wandb = wandb, dump_all_images = True)
# Model saving
torch.save(sfs_net_model.state_dict(), model_checkpoint_dir + 'sfs_net_model.pkl')
if epoch % 5 == 0:
t_total, t_normal, t_albedo, t_sh, t_recon = predict_sfsnet(sfs_net_model, syn_test_dl, train_epoch_num=epoch, use_cuda=use_cuda,
out_folder=out_syn_images_dir + '/test/', wandb=wandb, suffix='Test')
wandb.log({log_prefix+'Test Total loss': t_total, log_prefix+'Test Albedo loss': t_albedo, log_prefix+'Test Normal loss': t_normal, \
log_prefix+ 'Test SH loss': t_sh, log_prefix+'Test Recon loss': t_recon})
print('Test-set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}\n'.format(t_total,
t_normal, t_albedo, t_sh, t_recon))
def train_syn_celeba_both(sfs_net_model, syn_data, celeba_data,
batch_size = 10, num_epochs = 10, log_path = './results/metadata/', use_cuda=False, wandb=None,
lr = 0.01, wt_decay=0.005):
# data processing
syn_train_csv = syn_data + '/train.csv'
syn_test_csv = syn_data + '/test.csv'
celeba_train_csv = celeba_data + '/train.csv'
celeba_test_csv = celeba_data + '/test.csv'
# Load Synthetic dataset
train_dataset, val_dataset = get_sfsnet_dataset(dir=syn_data+'train/', read_from_csv=syn_train_csv, validation_split=10)
test_dataset, _ = get_sfsnet_dataset(dir=syn_data+'test/', read_from_csv=syn_test_csv, validation_split=0)
syn_train_dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
syn_val_dl = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
syn_test_dl = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
# Load CelebA dataset
train_dataset, val_dataset = get_celeba_dataset(read_from_csv=celeba_train_csv, validation_split=10)
test_dataset, _ = get_celeba_dataset(read_from_csv=celeba_test_csv, validation_split=0)
celeba_train_dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
celeba_val_dl = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
celeba_test_dl = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
print('Synthetic dataset: Train data: ', len(syn_train_dl), ' Val data: ', len(syn_val_dl), ' Test data: ', len(syn_test_dl))
print('CelebA dataset: Train data: ', len(celeba_train_dl), ' Val data: ', len(celeba_val_dl), ' Test data: ', len(celeba_test_dl))
model_checkpoint_dir = log_path + 'checkpoints/'
out_images_dir = log_path + 'out_images/'
out_syn_images_dir = out_images_dir + 'syn/'
out_celeba_images_dir = out_images_dir + 'celeba/'
os.system('mkdir -p {}'.format(model_checkpoint_dir))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'train/'))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'val/'))
os.system('mkdir -p {}'.format(out_syn_images_dir + 'test/'))
os.system('mkdir -p {}'.format(out_celeba_images_dir + 'train/'))
os.system('mkdir -p {}'.format(out_celeba_images_dir + 'val/'))
os.system('mkdir -p {}'.format(out_celeba_images_dir + 'test/'))
# Collect model parameters
model_parameters = sfs_net_model.parameters()
optimizer = torch.optim.Adam(model_parameters, lr=lr, weight_decay=wt_decay)
normal_loss = nn.L1Loss()
albedo_loss = nn.L1Loss()
sh_loss = nn.MSELoss()
recon_loss = nn.L1Loss()
c_recon_loss = nn.L1Loss()
if use_cuda:
normal_loss = normal_loss.cuda()
albedo_loss = albedo_loss.cuda()
sh_loss = sh_loss.cuda()
recon_loss = recon_loss.cuda()
c_recon_loss = c_recon_loss.cuda()
lamda_recon = 0.5
lamda_albedo = 0.5
lamda_normal = 0.5
lamda_sh = 0.1
if use_cuda:
normal_loss = normal_loss.cuda()
albedo_loss = albedo_loss.cuda()
sh_loss = sh_loss.cuda()
recon_loss = recon_loss.cuda()
syn_train_len = len(syn_train_dl)
celeba_train_len = len(celeba_train_dl)
for epoch in range(1, num_epochs+1):
tloss = 0 # Total loss
nloss = 0 # Normal loss
aloss = 0 # Albedo loss
shloss = 0 # SH loss
rloss = 0 # Reconstruction loss
celeba_tloss = 0 # Celeba Total loss
# Initiate iterators
syn_train_iter = iter(syn_train_dl)
celeba_train_iter = iter(celeba_train_dl)
# Until we process both Synthetic and CelebA data
while True:
# Get and train on Synthetic dataset
data = next(syn_train_iter, None)
if data is not None:
albedo, normal, mask, sh, face = data
if use_cuda:
albedo = albedo.cuda()
normal = normal.cuda()
mask = mask.cuda()
sh = sh.cuda()
face = face.cuda()
# Apply Mask on input image
face = applyMask(face, mask)
predicted_normal, predicted_albedo, predicted_sh, out_shading, out_recon = sfs_net_model(face)
# Loss computation
# Normal loss
current_normal_loss = normal_loss(predicted_normal, normal)
# Albedo loss
current_albedo_loss = albedo_loss(predicted_albedo, albedo)
# SH loss
current_sh_loss = sh_loss(predicted_sh, sh)
# Reconstruction loss
# Edge case: Shading generation requires denormalized normal and sh
# Hence, denormalizing face here
current_recon_loss = recon_loss(out_recon, denorm(face))
total_loss = lamda_recon * current_recon_loss + lamda_normal * current_normal_loss \
+ lamda_albedo * current_albedo_loss + lamda_sh * current_sh_loss
optimizer.zero_grad()
total_loss.backward(retain_graph=True)
optimizer.step()
# Logging for display and debugging purposes
tloss += total_loss.item()
nloss += current_normal_loss.item()
aloss += current_albedo_loss.item()
shloss += current_sh_loss.item()
rloss += current_recon_loss.item()
# Get and train on CelebA data
c_data = next(celeba_train_iter, None)
if c_data is not None:
# Get Mask as well if available
c_mask = None
if use_cuda:
c_data = c_data.cuda()
c_face = c_data
# Apply Mask on input image
# face = applyMask(face, mask)
c_predicted_normal, c_predicted_albedo, c_predicted_sh, c_out_shading, c_out_recon = sfs_net_model(c_face)
# Loss computation
# Reconstruction loss
# Edge case: Shading generation requires denormalized normal and sh
# Hence, denormalizing face here
crecon_loss = c_recon_loss(c_out_recon, denorm(c_face))
optimizer.zero_grad()
crecon_loss.backward()
optimizer.step()
celeba_tloss += crecon_loss.item()
if data is None and c_data is None:
break
print('Epoch: {} - Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}, CelebA loss'.format(epoch, tloss, \
nloss, aloss, shloss, rloss, celeba_tloss))
if epoch % 1 == 0:
print('Training set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}, CelebA Loss: {}'.format(tloss / syn_train_len, \
nloss / syn_train_len, aloss / syn_train_len, shloss / syn_train_len, rloss / syn_train_len, celeba_tloss / celeba_train_len))
# Log training info
wandb.log({'Train Total loss': tloss/syn_train_len, 'Train Albedo loss': aloss/syn_train_len, 'Train Normal loss': nloss/syn_train_len, \
'Train SH loss': shloss/syn_train_len, 'Train Recon loss': rloss/syn_train_len, 'Train CelebA loss:': celeba_tloss/celeba_train_len}, step=epoch)
# Log images in wandb
file_name = out_syn_images_dir + 'train/' + 'train_' + str(epoch)
wandb_log_images(wandb, predicted_normal, mask, 'Train Predicted Normal', epoch, 'Train Predicted Normal', path=file_name + '_predicted_normal.png')
wandb_log_images(wandb, predicted_albedo, mask, 'Train Predicted Albedo', epoch, 'Train Predicted Albedo', path=file_name + '_predicted_albedo.png')
wandb_log_images(wandb, out_shading, mask, 'Train Predicted Shading', epoch, 'Train Predicted Shading', path=file_name + '_predicted_shading.png', denormalize=False)
wandb_log_images(wandb, out_recon, mask, 'Train Recon', epoch, 'Train Recon', path=file_name + '_predicted_face.png', denormalize=False)
wandb_log_images(wandb, face, mask, 'Train Ground Truth', epoch, 'Train Ground Truth', path=file_name + '_gt_face.png')
wandb_log_images(wandb, normal, mask, 'Train Ground Truth Normal', epoch, 'Train Ground Truth Normal', path=file_name + '_gt_normal.png')
wandb_log_images(wandb, albedo, mask, 'Train Ground Truth Albedo', epoch, 'Train Ground Truth Albedo', path=file_name + '_gt_albedo.png')
# Log CelebA image
file_name = out_celeba_images_dir + 'train/' + 'train_' + str(epoch)
wandb_log_images(wandb, c_predicted_normal, c_mask, 'Train CelebA Predicted Normal', epoch, 'Train CelebA Predicted Normal', path=file_name + '_c_predicted_normal.png')
wandb_log_images(wandb, c_predicted_albedo, c_mask, 'Train CelebA Predicted Albedo', epoch, 'Train CelebA Predicted Albedo', path=file_name + '_c_predicted_albedo.png')
wandb_log_images(wandb, c_out_shading, c_mask, 'Train CelebA Predicted Shading', epoch, 'Train CelebA Predicted Shading', path=file_name + '_c_predicted_shading.png', denormalize=False)
wandb_log_images(wandb, c_out_recon, c_mask, 'Train CelebA Recon', epoch, 'Train CelebA Recon', path=file_name + '_c_predicted_face.png', denormalize=False)
wandb_log_images(wandb, c_face, c_mask, 'Train CelebA Ground Truth', epoch, 'Train CelebA Ground Truth', path=file_name + '_c_gt_face.png')
v_total, v_normal, v_albedo, v_sh, v_recon = predict_sfsnet(sfs_net_model, syn_val_dl, train_epoch_num=epoch, use_cuda=use_cuda,
out_folder=out_syn_images_dir+'/val/', wandb=wandb)
print('Synthetic Val set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}'.format(v_total,
v_normal, v_albedo, v_sh, v_recon))
v_total = predict_celeba(sfs_net_model, celeba_val_dl, train_epoch_num=epoch, use_cuda=use_cuda,
out_folder=out_celeba_images_dir+'/val/', wandb=wandb)
print('CelebA Val set results: Total Loss: {}'.format(v_total))
# Model saving
torch.save(sfs_net_model.state_dict(), model_checkpoint_dir + 'sfs_net_model.pkl')
if epoch % 5 == 0:
t_total, t_normal, t_albedo, t_sh, t_recon = predict_sfsnet(sfs_net_model, syn_test_dl, train_epoch_num=epoch, use_cuda=use_cuda,
out_folder=out_syn_images_dir + '/test/', wandb=wandb)
print('Test-set results: Total Loss: {}, Normal Loss: {}, Albedo Loss: {}, SH Loss: {}, Recon Loss: {}\n'.format(t_total,
t_normal, t_albedo, t_sh, t_recon))
| 37,617 | 52.663338 | 213 | py |
SfSNet-PyTorch | SfSNet-PyTorch-master/interpolate.py | from models import *
from utils import save_image
from torch.utils.data import Dataset, DataLoader, random_split
import torchvision
from torchvision import transforms
import numpy as np
import os
import argparse
IMAGE_SIZE = 128
def interpolate(model_dir, input_path, output_path):
use_cuda = torch.cuda.is_available()
os.system('mkdir -p {}'.format(output_path))
# Load images
transform = transforms.Compose([
transforms.Resize(IMAGE_SIZE),
transforms.ToTensor()
])
img_dataset = torchvision.datasets.ImageFolder(input_path, transform=transform)
dl = DataLoader(img_dataset, batch_size=1)
print('Data size:', len(dl))
# Load model
sfs_net_model = SfsNetPipeline()
if use_cuda:
sfs_net_model = sfs_net_model.cuda()
sfs_net_model.load_state_dict(torch.load(model_dir + 'sfs_net_model.pkl'))
for bix, (data, _) in enumerate(dl):
if use_cuda:
data = data.cuda()
normal, albedo, sh, shading, recon = sfs_net_model(data)
output_dir = output_path + str(bix)
# normal = normal * 128 + 128
# normal = normal.clamp(0, 255) / 255
save_image(data, path=output_dir+'_face.png')
save_image(normal, path=output_dir+'_normal.png')
save_image(albedo, path=output_dir+'_albedo.png')
save_image(shading, path=output_dir+'_shading.png')
save_image(recon, path=output_dir+'_recon.png')
sh = sh.cpu().detach().numpy()
np.savetxt(output_dir+'_light.txt', sh, delimiter='\t')
def main():
parser = argparse.ArgumentParser(description='SfSNet - Interpolation')
parser.add_argument('--data', type=str, default='../data/interpolation-input/faces/',
help='interpolation input')
parser.add_argument('--load_model', type=str, default=None,
help='load model from')
parser.add_argument('--output_dir', type=str, default=None,
help='Interpolation output path')
args = parser.parse_args()
model_dir = args.load_model
data_dir = args.data
output_dir = args.output_dir
# load Synthetic trained model only
# model_path = model_dir + 'Synthetic_Train/checkpoints/'
# output_dir_syn = output_dir + '/Synthetic_Train_Interpolation/'
# interpolate(model_path, data_dir, output_dir_syn)
# load Mix Data trained model
model_path = model_dir + 'Mix_Training/checkpoints/'
output_dir_mix = output_dir + '/Mix_Train_Interpolation/'
interpolate(model_path, data_dir, output_dir_mix)
if __name__ == '__main__':
main()
| 2,582 | 30.120482 | 89 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.