repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
pybo
|
pybo-master/pybo/demos/animated2.py
|
"""
Animated demo showing progress of Bayesian optimization on a simple
two-dimensional function.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from reggie import make_gp, MCMC
from ezplot import figure, show
from pybo import inits
from pybo import policies
from pybo import solvers
from pybo import recommenders
__all__ = []
def f(x):
"""
Test function we'll optimize. This is a 1d, sinusoidal function used by
Gramacy and Lee in "Cases for the nugget in modeling computer experiments".
"""
x = np.array(x, ndmin=2)
y = (x[:, 1]-(5.1/(4*np.pi**2))*x[:, 0]**2+5*x[:, 0]/np.pi-6)**2
y += 10*(1-1/(8*np.pi))*np.cos(x[:, 0])+10
# NOTE: this rescales branin by 10 to make it more manageable.
y /= 10.
return -np.squeeze(y)
def main():
"""Run the demo."""
# define the bounds over which we'll optimize, the optimal x for comparison,
# and a sequence of test points
bounds = np.array([[-5, 10.], [0, 15]])
xopt = np.array([np.pi, 2.275])
x1, x2 = np.meshgrid(np.linspace(*bounds[0], num=100),
np.linspace(*bounds[1], num=100))
xx = np.c_[x1.flatten(), x2.flatten()]
# get initial data and some test points.
X = list(inits.init_latin(bounds, 6))
Y = [f(x_) for x_ in X]
F = list()
# initialize the model
model = make_gp(0.01, 10, [1., 1.], 0)
model.add_data(X, Y)
# set a prior on the parameters
model.params['like.sn2'].set_prior('uniform', 0.005, 0.015)
model.params['kern.rho'].set_prior('lognormal', 0, 3)
model.params['kern.ell'].set_prior('lognormal', 0, 3)
model.params['mean.bias'].set_prior('normal', 0, 20)
# make a model which samples parameters
model = MCMC(model, n=10, rng=None)
# create a new figure
fig = figure(figsize=(10, 6))
while True:
# get index to solve it and plot it
index = policies.EI(model, bounds, X, xi=0.1)
# get the recommendation and the next query
xbest = recommenders.best_incumbent(model, bounds, X)
xnext, _ = solvers.solve_lbfgs(index, bounds)
# observe and update model
ynext = f(xnext)
model.add_data(xnext, ynext)
# evaluate the posterior and the acquisition function
mu, s2 = model.predict(xx)
# record our data and update the model
X.append(xnext)
Y.append(ynext)
F.append(f(xbest))
fig.clear()
ax1 = fig.add_subplotspec((2, 2), (0, 0), hidex=True)
ax2 = fig.add_subplotspec((2, 2), (1, 0), hidey=True, sharex=ax1)
ax3 = fig.add_subplotspec((2, 2), (0, 1), rowspan=2)
# plot the posterior and data
ax1.contourf(x1, x2, mu.reshape(x1.shape), alpha=0.4)
X_ = np.array(X)
ax1.scatter(X_[:-1, 0], X_[:-1, 1], marker='.')
ax1.scatter(xbest[0], xbest[1], linewidths=3, marker='o', color='r')
ax1.scatter(xnext[0], xnext[1], linewidths=3, marker='o', color='g')
ax1.set_xlim(*bounds[0])
ax1.set_ylim(*bounds[1])
ax1.set_title('current model (xbest and xnext)')
# plot the acquisition function
ax2.contourf(x1, x2, index(xx).reshape(x1.shape), alpha=0.5)
ax2.scatter(xbest[0], xbest[1], linewidths=3, marker='o', color='r')
ax2.scatter(xnext[0], xnext[1], linewidths=3, marker='o', color='g')
ax2.set_xlim(*bounds[0])
ax2.set_ylim(*bounds[1])
ax2.set_title('current policy (xnext)')
# plot the latent function at recomended points
ax3.axhline(f(xopt))
ax3.plot(F)
ax3.set_ylim(-1., 0.)
ax3.set_title('value of recommendation')
# draw
fig.canvas.draw()
show(block=False)
if __name__ == '__main__':
main()
| 3,847
| 30.032258
| 80
|
py
|
pybo
|
pybo-master/pybo/demos/bandit.py
|
"""
Animated demo showing optimization of a bandit with independent arms.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from reggie import BetaBernoulli
from ezplot import figure, show
def main():
"""Run the demo."""
# define the model and sample an instance from it
n = 10
rng = np.random.RandomState(3)
model = BetaBernoulli(np.ones(n))
f = model.sample(rng=rng)
# grab the optimal latent value and make lists of observations
xopt = f.argmax()
fopt = f.max()
# create a new figure
fig = figure(figsize=(10, 6))
while True:
# evaluate the posterior before updating the model for plotting
mu = model.get_quantile(0.5)
lo = model.get_quantile(0.05)
hi = model.get_quantile(0.95)
# get our index
target = mu.max()
index = model.get_improvement(target)
# query
x = index.argmax()
y = int(rng.uniform() < f[x])
# add the data
model.add_data(x, y)
# PLOT EVERYTHING
fig.clear()
ax1 = fig.add_subplotspec((2, 1), (0, 0), hidex=True)
ax2 = fig.add_subplotspec((2, 1), (1, 0), hidey=True, sharex=ax1)
ax1.errorbar(np.arange(n), mu, (mu-lo, hi-mu), ls='', marker='s',
markersize=20, capsize=30, capthick=2)
ax1.axvline(xopt, zorder=-1)
ax1.axhline(fopt, zorder=-1)
ax1.set_ylim(0, 1)
ax1.set_xlim(-0.3, n-1+0.3)
ax2.bar(np.arange(n)-0.25, index, 0.5)
# draw
fig.canvas.draw()
show()
if __name__ == '__main__':
main()
| 1,684
| 23.779412
| 73
|
py
|
pybo
|
pybo-master/pybo/demos/subprocess.py
|
"""
This demo illustrates how to use pybo to optimize a black-box function that
calls an external process. In particular this calls the command line calculator
`bc` to optimize a simple quadratic.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from ezplot import figure, show
from pybo import solve_bayesopt
from pybo.utils import SubprocessQuery
__all__ = []
def main():
"""Run the demo."""
# grab a test function
f = SubprocessQuery("bc <<< 'scale=8; x={}; -((x-3)^2)'")
bounds = [0, 8]
x = np.linspace(bounds[0], bounds[1], 500)
# solve the model
xbest, model, info = solve_bayesopt(f, bounds, niter=30, verbose=True)
mu, s2 = model.predict(x[:, None])
# plot the final model
ax = figure().gca()
ax.plot_banded(x, mu, 2*np.sqrt(s2))
ax.axvline(xbest)
ax.scatter(info.x.ravel(), info.y)
ax.figure.canvas.draw()
show()
if __name__ == '__main__':
main()
| 1,013
| 23.142857
| 79
|
py
|
pybo
|
pybo-master/pybo/demos/solve.py
|
"""
Demo which illustrates how to use solve_bayesopt as a simple method for global
optimization. The return values are the sequence of recommendations made by the
algorithm as well as the final model. The point `xbest[-1]` is the final
recommendation, i.e. the expected maximizer.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from ezplot import figure, show
from pybo import solve_bayesopt
__all__ = []
def f(x):
"""
Test function that we will optimize. This is a simple sinusoidal function
whose maximum should be found very quickly.
"""
x = float(x)
return -np.cos(x) - np.sin(3*x)
def main():
"""Run the demo."""
# grab a test function
bounds = [0, 2*np.pi]
x = np.linspace(bounds[0], bounds[1], 500)
# solve the model
xbest, model, info = solve_bayesopt(f, bounds, niter=30, verbose=True)
# make some predictions
mu, s2 = model.predict(x[:, None])
# plot the final model
ax = figure().gca()
ax.plot_banded(x, mu, 2*np.sqrt(s2))
ax.axvline(xbest)
ax.scatter(info.x.ravel(), info.y)
ax.figure.canvas.draw()
show()
if __name__ == '__main__':
main()
| 1,238
| 22.826923
| 79
|
py
|
pybo
|
pybo-master/pybo/demos/interactive.py
|
"""
This demo illustrates how to use pybo to optimize a black-box function that
requires a human in the loop. This script will prompt the user for a numerical
value at a particular design point every time it needs a new observation.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from ezplot import figure, show
from pybo import solve_bayesopt
from pybo.utils import InteractiveQuery
__all__ = []
def main():
"""Run the demo."""
# initialize interactive function and 1d bounds
f = InteractiveQuery()
bounds = [0, 1]
x = np.linspace(bounds[0], bounds[1], 100)
# optimize the model and get final predictions
xbest, model, info = solve_bayesopt(f, bounds, niter=10)
mu, s2 = model.predict(x[:, None])
# plot the final model
fig = figure()
axs = fig.gca()
axs.plot_banded(x, mu, 2*np.sqrt(s2))
axs.axvline(xbest)
axs.scatter(info.x.ravel(), info.y)
fig.canvas.draw()
show()
if __name__ == '__main__':
main()
| 1,067
| 23.837209
| 78
|
py
|
pybo
|
pybo-master/pybo/demos/animated.py
|
"""
Animated demo showing progress of Bayesian optimization on a simple
(but highly multimodal) one-dimensional function.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from reggie import make_gp, MCMC
from ezplot import figure, show
from pybo import inits
from pybo import policies
from pybo import solvers
from pybo import recommenders
__all__ = []
def f(x):
"""
Test function we'll optimize. This is a 1d, sinusoidal function used by
Gramacy and Lee in "Cases for the nugget in modeling computer experiments".
"""
x = float(x)
return -np.sin(10*np.pi*x) / (2*x) - (x-1)**4
def main():
"""Run the demo."""
# define the bounds over which we'll optimize, the optimal x for
# comparison, and a sequence of test points
bounds = np.array([[0.5, 2.5]])
xopt = 0.54856343
fopt = f(xopt)
x = np.linspace(bounds[0][0], bounds[0][1], 500)
# get initial data and some test points.
X = list(inits.init_latin(bounds, 3))
Y = [f(x_) for x_ in X]
F = []
# initialize the model
model = make_gp(0.01, 1.9, 0.1, 0)
model.add_data(X, Y)
# set a prior on the parameters
model.params['like.sn2'].set_prior('uniform', 0.005, 0.015)
model.params['kern.rho'].set_prior('lognormal', 0, 100)
model.params['kern.ell'].set_prior('lognormal', 0, 10)
model.params['mean.bias'].set_prior('normal', 0, 20)
# make a model which samples parameters
model = MCMC(model, n=20, rng=None)
# create a new figure
fig = figure(figsize=(10, 6))
while True:
# get acquisition function (or index)
index = policies.EI(model, bounds, X, xi=0.1)
# get the recommendation and the next query
xbest = recommenders.best_incumbent(model, bounds, X)
xnext, _ = solvers.solve_lbfgs(index, bounds)
ynext = f(xnext)
# evaluate the posterior before updating the model for plotting
mu, s2 = model.predict(x[:, None])
# record our data and update the model
X.append(xnext)
Y.append(ynext)
F.append(f(xbest))
model.add_data(xnext, ynext)
# PLOT EVERYTHING
fig.clear()
ax1 = fig.add_subplotspec((2, 2), (0, 0), hidex=True)
ax2 = fig.add_subplotspec((2, 2), (1, 0), hidey=True, sharex=ax1)
ax3 = fig.add_subplotspec((2, 2), (0, 1), rowspan=2)
# plot the posterior and data
ax1.plot_banded(x, mu, 2*np.sqrt(s2))
ax1.scatter(np.ravel(X), Y)
ax1.axvline(xbest)
ax1.axvline(xnext, color='g')
ax1.set_ylim(-6, 3)
ax1.set_title('current model (xbest and xnext)')
# plot the acquisition function
ax2.plot_banded(x, index(x[:, None]))
ax2.axvline(xnext, color='g')
ax2.set_xlim(*bounds)
ax2.set_title('current policy (xnext)')
# plot the latent function at recomended points
ax3.plot(F)
ax3.axhline(fopt)
ax3.set_ylim(0.4, 0.9)
ax3.set_title('value of recommendation')
# draw
fig.canvas.draw()
show(block=False)
if __name__ == '__main__':
main()
| 3,215
| 27.460177
| 79
|
py
|
pybo
|
pybo-master/pybo/demos/__init__.py
| 0
| 0
| 0
|
py
|
|
RLNLocalization
|
RLNLocalization-main/main.py
|
# =====================================================
# Train the segmentation Network for the three tissues
# =====================================================
import time
from op.run_op import Trainer
from op.args_op import TrainParsers
if __name__ == '__main__':
exp_name = "UNetSeg"
args = TrainParsers(exp_name)
trainer = Trainer(args)
init_metric = trainer.val()
best_metric = init_metric
init_info = "Init metric: {:.5f}".format(init_metric)
open(trainer.log, "a+").write(init_info+"\n")
print(init_info)
for epoch in range(args.start_epoch, args.num_epochs + args.start_epoch):
print("Training epoch", epoch)
start_time = time.time()
loss = trainer.train()
curr_metric = trainer.val()
epoch_info = "Epoch [{}/{}] Loss: {:.5f} Metric: {:.5f} Time: {}min".format(
epoch, args.num_epochs + args.start_epoch - 1, loss, curr_metric, (time.time() - start_time) // 60
)
open(trainer.log, "a+").write(epoch_info+"\n")
print(epoch_info)
if curr_metric > best_metric:
best_metric = curr_metric
trainer.save_weight("best")
trainer.save_weight(str(epoch))
trainer.update_count()
| 1,250
| 30.275
| 110
|
py
|
RLNLocalization
|
RLNLocalization-main/statistic_test.py
|
# ========================================
# Perform alignment based on Prior Library
# ========================================
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
from scipy.ndimage import center_of_mass
from medpy.metric import dc
from torchvision import transforms as T
from op.data_op import load_list
from dipy.align import imaffine
from dipy.align import transforms
TEXT_PATH = "Text"
VIEW = ['left']
VISUAL = 'Visual'
PRIOR_DATA = 'PRIOR'
def double_align(tissues_mask, segmentations_mask, rln_and_tissues_mask):
identity = np.eye(3)
c_of_mass = imaffine.transform_centers_of_mass(segmentations_mask, identity, tissues_mask, identity)
n_bins = 32
sampling_prop = None
metric = imaffine.MutualInformationMetric(n_bins, sampling_prop)
level_iter = [10000, 1000, 100]
sigmas = [3.0, 1.0, 0.0]
factors = [4, 2, 1]
affine_reg = imaffine.AffineRegistration(metric=metric, level_iters=level_iter, sigmas=sigmas, factors=factors,
verbosity=0)
transform = transforms.TranslationTransform2D()
params0 = None
starting_affine = c_of_mass.affine
translation = affine_reg.optimize(segmentations_mask, tissues_mask, transform, params0, identity, identity,
starting_affine=starting_affine)
# transformed_img = translation.transform(img, interpolation='linear')
transformed_tissues_mask = translation.transform(tissues_mask, interpolation='nearest')
transformed_rln_and_tissues_mask = translation.transform(rln_and_tissues_mask, interpolation='nearest')
transformed_tissues_mask = transformed_tissues_mask / 50
transformed_tissues_mask = transformed_tissues_mask.astype(np.int32)
transformed_tissues_mask *= 50
transformed_rln_and_tissues_mask = transformed_rln_and_tissues_mask / 50
transformed_rln_and_tissues_mask = transformed_rln_and_tissues_mask.astype(np.int32)
transformed_rln_and_tissues_mask *= 50
return transformed_tissues_mask, transformed_rln_and_tissues_mask
if __name__ == '__main__':
train_list, val_list, test_list = load_list(TEXT_PATH, VIEW)
target_list = test_list
source_list = val_list + train_list
os.makedirs(VISUAL, exist_ok=True)
os.makedirs(PRIOR_DATA, exist_ok=True)
mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
target_dice_list = []
for target_path in target_list[:10]:
patient_id, view_type, item_id = target_path.split('\\')[1:]
segmentations_path = 'Results/{}-{}-{}.png'.format(patient_id, view_type, item_id)
seg_msk = Image.open(segmentations_path).convert('L')
seg_msk_arr = np.array(seg_msk, dtype=np.int32)
target_item_dice_list = []
prior_item_center_list = []
for source_path in tqdm(source_list):
temp_list = []
for idx, mask_item in enumerate(["CCA", "thyroid", "trachea", "RLN"]):
msk = Image.open(os.path.join(source_path, "MASK", "{}.jpg".format(mask_item)))
msk = mask_transform(msk)
msk = np.array(msk) / 255 * (idx + 1)
temp_list.append(msk)
tissues_msk_arr = np.stack(temp_list[:-1], axis=0)
tissues_msk_arr = np.max(tissues_msk_arr, axis=0) * 50
tissues_msk_arr = tissues_msk_arr.astype(np.int32)
rln_msk_arr = np.stack(temp_list, axis=0)
rln_msk_arr = np.max(rln_msk_arr, axis=0) * 50
rln_msk_arr = rln_msk_arr.astype(np.int32)
aligned_tissues_msk, aligned_rln_msk = double_align(tissues_msk_arr, seg_msk_arr, rln_msk_arr)
dice_aligned_seg = dc(aligned_tissues_msk, seg_msk_arr)
target_item_dice_list.append(dice_aligned_seg)
prior_rln_msk = aligned_rln_msk == 200
prior_rln_msk = prior_rln_msk.astype(np.int32)
center_coord = center_of_mass(prior_rln_msk)
prior_item_center_list.append([center_coord[0], center_coord[1], dice_aligned_seg])
target_item_dice_list.sort(reverse=True)
target_dice_list.append(target_item_dice_list)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(seg_msk_arr)
for temp in prior_item_center_list:
if temp[2] > 0.85:
s = 15
elif 0.75 > temp[2] > 0.85:
s = 5
else:
s = 1
plt.scatter(x=temp[1], y=temp[0], s=s, alpha=0.3, c="red")
plt.subplot(1, 2, 2)
plt.plot(target_item_dice_list)
plt.title('Sorted Dice score of each aligned prior mask to the segmentation')
plt.xlabel('Subj Id')
plt.ylabel('Dice')
# plt.show()
plt.savefig(os.path.join(VISUAL, '{}-{}-{}.png'.format(patient_id, view_type, item_id)))
plt.close()
prior_item_center_arr = np.array(prior_item_center_list)
np.save(os.path.join(PRIOR_DATA, '{}-{}-{}.npy'.format(patient_id, view_type, item_id)), prior_item_center_arr)
# x = np.arange(1, len(target_item_dice_list) + 1)
plt.figure()
for temp in target_dice_list:
plt.plot(temp)
plt.show()
| 5,288
| 37.326087
| 120
|
py
|
RLNLocalization
|
RLNLocalization-main/utils.py
|
import os
import torch
import numpy as np
from matplotlib import pyplot as plt
from medpy.metric import dc
from dipy.align import imaffine
from dipy.align import transforms
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def set_device(cuda):
"""
Set the torch gpu device
TODO: parallel setup is requested
----------------------------
Parameters:
cuda: [int] id of the used GPU, where -1 is "cpu"
Return:
torch device
----------------------------
"""
assert isinstance(cuda, int)
if cuda == -1 or not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda:{}'.format(cuda))
return device
class Plotter(object):
"""
Plot the loss/metric curves
"""
def __init__(self, send_path):
"""
send_path: [string] path to save the figures
"""
self.send_path = send_path
self.buffer = dict()
def update(self, logs):
"""
logs: [dict] metric dict that to be plot
"""
for key in logs.keys():
if key not in self.buffer.keys():
self.buffer[key] = []
self.buffer[key].append(logs[key])
def send(self):
"""
function to plot the curve
"""
for key in self.buffer.keys():
plt.figure()
plt.plot(self.buffer[key])
plt.title(key)
plt.xlabel("epoch")
plt.savefig(os.path.join(self.send_path, key+".png"))
plt.close()
class Recorder(object):
"""
record the metric and return the statistic results
"""
def __init__(self, keys):
"""
keys: [list] variables' name to be saved
"""
self.data = dict()
self.keys = keys
for key in keys:
self.data[key] = []
def update(self, item):
"""
item: [dict] data dict to update the buffer, the keys should be consistent
"""
for key in item.keys():
self.data[key].append(item[key])
def reset(self, keys=None):
"""
keys: [list] variables to be cleared in the buffer
"""
if keys is None:
keys = self.data.keys()
for key in keys:
self.data[key] = []
def call(self, key, return_std=False):
"""
key: [string] variable to be calculated for the statistical results
return_std: [bool] option to return variance
"""
arr = np.array(self.data[key])
if return_std:
return np.mean(arr), np.std(arr)
else:
return np.mean(arr)
def array2tensor(array, dtype="float32"):
"""
transfer the numpy array to the torch tensor
TODO: more dtype is requested
----------------------------
Parameters:
array: [numpy.array] array to be transferred
dtype: [string] type of the tensor, current only support Float32 and Int64
Return:
torch tensor
----------------------------
"""
tensor = torch.from_numpy(array)
if dtype == "float32":
return tensor.float()
elif dtype == "int64":
return tensor.long()
else:
raise NameError("Currently only support Float32 and Int64")
def tensor2array(tensor, squeeze=False):
"""
transfer the torch tensor to the numpy array
----------------------------
Parameters:
tensor: [torch.Tensor] tensor to be transferred
squeeze: [bool] option for squeeze the tensor
Return:
numpy array
----------------------------
"""
if squeeze:
tensor = tensor.squeeze()
return tensor.cpu().detach().numpy()
def procrustes_analysis(reference_mask, mask):
identity = np.eye(3)
c_of_mass = imaffine.transform_centers_of_mass(reference_mask, identity, mask, identity)
n_bins = 32
sampling_prop = None
metric = imaffine.MutualInformationMetric(n_bins, sampling_prop)
level_iter = [10000, 1000, 100]
sigmas = [3.0, 1.0, 0.0]
factors = [4, 2, 1]
affine_reg = imaffine.AffineRegistration(metric=metric, level_iters=level_iter, sigmas=sigmas, factors=factors)
transform = transforms.TranslationTransform2D()
params0 = None
translation = affine_reg.optimize(reference_mask, mask, transform, params0, identity, identity,
starting_affine=c_of_mass.affine)
transform = transforms.RigidTransform2D()
rigid = affine_reg.optimize(reference_mask, mask, transform, params0, identity, identity, starting_affine=translation.affine)
# transformed_img = rigid.transform(img, interpolation='linear')
transformed_mask = rigid.transform(mask, interpolation='nearest')
transformed_mask = transformed_mask / 50
transformed_mask = transformed_mask.astype(np.int32)
transformed_mask *= 50
print(set(list(transformed_mask.reshape(-1))))
return
| 4,994
| 26.75
| 129
|
py
|
RLNLocalization
|
RLNLocalization-main/refine_infer.py
|
# =====================================================
# Test the segmentation Network for the three tissues
# =====================================================
from op import args_op as ini_op
from op.run_op import RefineTester
if __name__ == '__main__':
exp_name = "RefineLocate-v2"
args = ini_op.RefineTestParsers(exp_name)
print("Start evaluation {}".format(args.exp_name))
tester = RefineTester(args)
tester.test()
| 451
| 24.111111
| 55
|
py
|
RLNLocalization
|
RLNLocalization-main/refine_train.py
|
# =====================================================
# Train the segmentation Network for the three tissues
# =====================================================
import time
from op.run_op import Refiner
from op.args_op import RefineParsers
if __name__ == '__main__':
exp_name = "RefineLocate-v2"
args = RefineParsers(exp_name)
trainer = Refiner(args)
init_metric = trainer.val()
best_metric = init_metric
init_info = "Init metric: {:.5f}".format(init_metric)
open(trainer.log, "a+").write(init_info+"\n")
print(init_info)
for epoch in range(args.start_epoch, args.num_epochs + args.start_epoch):
print("Training epoch", epoch)
start_time = time.time()
loss = trainer.train()
curr_metric = trainer.val()
epoch_info = "Epoch [{}/{}] Loss: {:.5f} Metric: {:.5f} Time: {}min".format(
epoch, args.num_epochs + args.start_epoch - 1, loss, curr_metric, (time.time() - start_time) // 60
)
open(trainer.log, "a+").write(epoch_info+"\n")
print(epoch_info)
if curr_metric < best_metric:
best_metric = curr_metric
trainer.save_weight("best")
trainer.save_weight(str(epoch))
trainer.update_count()
| 1,260
| 30.525
| 110
|
py
|
RLNLocalization
|
RLNLocalization-main/prior_localize.py
|
import os
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from torchvision import transforms as T
from scipy.ndimage import center_of_mass
PRIOR_PATH = 'PRIOR_right'
SAVE_PATH = 'Prior_Results_right'
GT_PATH = '../Dataset/Data'
mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
if __name__ == '__main__':
os.makedirs(SAVE_PATH, exist_ok=True)
prior_list = os.listdir(PRIOR_PATH)
error_mean = 0.
error_weighted_mean = 0.
count = 0
error_arr = []
for prior_name in prior_list:
prior_data = np.load(os.path.join(PRIOR_PATH, prior_name))
nan_check = ~np.isnan(prior_data).any(axis=1)
prior_data = prior_data[nan_check, :]
dice_threshold = 0.8
failed_check = prior_data[:, 2] > dice_threshold
prior_data = prior_data[failed_check, :]
K = 10
top_orders = np.argsort(prior_data[:, 2])
top_orders = np.flipud(top_orders)
top_orders = top_orders[:K]
prior_data = prior_data[top_orders, :]
prior_name = prior_name.split('.npy')[0]
patient_id, view_type_1, view_type_2, item_id = prior_name.split('-')
view_type = view_type_1 + '-' + view_type_2
temp_list = []
for idx, mask_item in enumerate(["CCA", "thyroid", "trachea", "RLN"]):
msk = Image.open(os.path.join(GT_PATH, patient_id, view_type, item_id, "MASK", "{}.jpg".format(mask_item)))
msk = mask_transform(msk)
msk = np.array(msk) / 255 * (idx + 1)
temp_list.append(msk)
gt_msk_arr = np.stack(temp_list, axis=0)
gt_msk_arr = np.max(gt_msk_arr, axis=0) * 50
gt_msk_arr = gt_msk_arr.astype(np.int32)
gt_rln_msk = gt_msk_arr == 200
gt_rln_msk = gt_rln_msk.astype(np.int32)
gt_rln_coord = list(center_of_mass(gt_rln_msk))
mean_rln_coord = np.mean(prior_data, axis=0)
weight = np.exp(prior_data[:, 2])
weight /= weight.sum()
weighted_mean_rln_coord = np.sum(prior_data[:, :2] * weight[:, np.newaxis], axis=0)
dist_mean_gt = np.sqrt(np.sum((mean_rln_coord[:2] - gt_rln_coord) ** 2))
dist_weighted_mean_gt = np.sqrt(np.sum((weighted_mean_rln_coord - gt_rln_coord) ** 2))
plt.figure()
plt.imshow(gt_msk_arr)
for temp in prior_data:
plt.scatter(x=temp[1], y=temp[0], s=1, alpha=0.1, c="red")
plt.scatter(x=gt_rln_coord[1], y=gt_rln_coord[0], s=5, c='blue', label='GT')
plt.scatter(x=mean_rln_coord[1], y=mean_rln_coord[0], s=5, c='whitesmoke', label='Mean')
plt.scatter(x=weighted_mean_rln_coord[1], y=weighted_mean_rln_coord[0], s=5, c='cyan', label='Weighted Mean')
plt.legend()
plt.savefig(os.path.join(SAVE_PATH, '{}-{}-{}.png'.format(patient_id, view_type, item_id)))
plt.close()
print(prior_name, dist_mean_gt, dist_weighted_mean_gt, gt_rln_coord, mean_rln_coord, weighted_mean_rln_coord)
error_mean += dist_mean_gt
error_weighted_mean += dist_weighted_mean_gt
if np.isnan(np.sum(dist_mean_gt)):
error_arr.append(dist_mean_gt)
else:
error_arr.append(dist_mean_gt)
count += 1
save_list = {'Mean': mean_rln_coord,
'Weighted_Mean': weighted_mean_rln_coord,
'K': K,
'Dice_Threshold': dice_threshold}
np.save(os.path.join(SAVE_PATH, '{}-{}-{}.npy'.format(patient_id, view_type, item_id)), save_list)
print(np.mean(error_arr), np.std(error_arr))
print(np.mean(np.array(error_arr) < 15))
print("Avg", error_mean/count, error_weighted_mean/count)
| 3,704
| 35.683168
| 119
|
py
|
RLNLocalization
|
RLNLocalization-main/infer.py
|
# =====================================================
# Test the segmentation Network for the three tissues
# =====================================================
from op import args_op as ini_op
from op.run_op import Tester
if __name__ == '__main__':
exp_name = "UNetSeg"
args = ini_op.TestParsers(exp_name)
print("Start evaluation {}".format(args.exp_name))
tester = Tester(args)
tester.test()
| 425
| 22.666667
| 55
|
py
|
RLNLocalization
|
RLNLocalization-main/models/__init__.py
|
from models.AutoEncoder.model import U_Net
from models.Regress.model import Locator, GLLocator, MSLocator, MCLocator, SLocator
| 127
| 41.666667
| 83
|
py
|
RLNLocalization
|
RLNLocalization-main/models/Regress/model.py
|
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from utils import tensor2array
from medpy.metric import dc
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
"""
Up Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class Locator(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch, feat_n, loss_weight):
super(Locator, self).__init__()
filters = [feat_n, feat_n * 2, feat_n * 4, feat_n * 8, feat_n * 16]
self.loss_weight = loss_weight
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.ada_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Sequential(
nn.Linear(1024, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 2)
)
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
flat_e5 = self.ada_pool(e5).view(x.size(0), -1)
out = self.fc(flat_e5)
return out
def evaluate(self, x, y):
out = self.forward(x)
out_arr = tensor2array(out, True)
out_arr = np.clip(out_arr, a_min=0, a_max=64)
y_arr = tensor2array(y, True)
dist = np.linalg.norm(out_arr - y_arr)
return dist
def loss_function(self, x, y):
out = self.forward(x)
regression = F.smooth_l1_loss(out, y)
total = regression * self.loss_weight["Regression"]
losses = {
'Total': total.item(),
'Regression': regression.item()
}
return total, losses
class MCLocator(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch, feat_n, loss_weight):
super(MCLocator, self).__init__()
filters = [feat_n, feat_n * 2, feat_n * 4, feat_n * 8, feat_n * 16]
self.loss_weight = loss_weight
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2]*2, filters[3])
self.Conv5 = nn.Sequential(
conv_block(filters[3], filters[2]),
conv_block(filters[2], filters[2]),
)
self.ada_pool = nn.AdaptiveAvgPool2d(output_size=6)
self.fc = nn.Sequential(
nn.Linear(9216, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 2)
)
def forward(self, x):
s, l = x
s_f = self.feature_extract(s)
l_f = self.feature_extract(l)
l_f = self.ada_pool(l_f)
f = torch.cat((l_f, s_f), dim=1)
f = self.Conv4(f)
f = self.Conv5(f)
flat_f = torch.flatten(f, start_dim=1)
out = self.fc(flat_f)
return out
def feature_extract(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
return e3
def evaluate(self, x, y):
out = self.forward(x)
out_arr = tensor2array(out, True)
out_arr = np.clip(out_arr, a_min=0, a_max=24)
y_arr = tensor2array(y, True)
dist = np.linalg.norm(out_arr - y_arr)
return dist
def loss_function(self, x, y):
out = self.forward(x)
regression = F.smooth_l1_loss(out, y)
total = regression * self.loss_weight["Regression"]
losses = {
'Total': total.item(),
'Regression': regression.item()
}
return total, losses
| 5,554
| 26.775
| 85
|
py
|
RLNLocalization
|
RLNLocalization-main/models/Regress/__init__.py
| 0
| 0
| 0
|
py
|
|
RLNLocalization
|
RLNLocalization-main/models/AutoEncoder/model.py
|
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from utils import tensor2array
from medpy.metric import dc
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
"""
Up Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class U_Net(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch, out_ch, feat_n, loss_weight):
super(U_Net, self).__init__()
filters = [feat_n, feat_n * 2, feat_n * 4, feat_n * 8, feat_n * 16]
self.loss_weight = loss_weight
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.Up5 = up_conv(filters[4], filters[3])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
d5 = self.Up5(e5)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(d2)
out = self.Conv(d2)
# d1 = self.active(out)
return out
def evaluate(self, x, y):
out = self.forward(x)
out_arr = tensor2array(out, True)
out_arr = np.argmax(out_arr, axis=0)
y_arr = tensor2array(y, True)
dice = dc(out_arr, y_arr)
return dice
def loss_function(self, x, y):
out = self.forward(x)
cross_entropy = F.cross_entropy(out, y)
total = cross_entropy * self.loss_weight["CrossEntropy"]
losses = {
'Total': total.item(),
'CrossEntropy': cross_entropy.item()
}
return total, losses
| 3,901
| 27.071942
| 85
|
py
|
RLNLocalization
|
RLNLocalization-main/models/AutoEncoder/__init__.py
| 0
| 0
| 0
|
py
|
|
RLNLocalization
|
RLNLocalization-main/op/model_op.py
|
from models import U_Net, Locator, MCLocator
def load_model(args):
if "UNet" in args.exp_name:
model = U_Net(
in_ch=args.in_dim,
out_ch=args.out_dim,
feat_n=args.feat_n,
loss_weight=args.loss_weight
)
elif "Locate-v1" in args.exp_name:
model = Locator(
in_ch=args.in_dim,
feat_n=args.feat_n,
loss_weight=args.loss_weight
)
elif "Locate-v2" in args.exp_name:
model = MCLocator(
in_ch=args.in_dim,
feat_n=args.feat_n,
loss_weight=args.loss_weight
)
else:
raise NameError
return model
| 681
| 23.357143
| 44
|
py
|
RLNLocalization
|
RLNLocalization-main/op/args_op.py
|
import os
class BaseParsers(object):
def __init__(self, exp_name):
self.exp_name = exp_name
self.log_path = os.path.join("../Exps", exp_name, "logs")
self.ckpt_path = os.path.join("../Exps", exp_name, "ckpts")
self.save_path = os.path.join("../Exps", exp_name, "save")
self.text_path = "Text"
self.view = ['left']
self.gpu_id = -1
self.init_path = None
self.in_dim = 1
self.out_dim = 4
self.feat_n = 64
self.loss_weight = {
'CrossEntropy': 1,
'Regression': 1
}
class TrainParsers(BaseParsers):
def __init__(self, exp_name):
super(TrainParsers, self).__init__(exp_name)
self.batch_size = 16
self.lr = 3e-4
self.weight_decay = 5e-4
self.gamma = 0.95
self.start_epoch = 1
self.num_epochs = 100
self.reuse = 0
self.print_freq = 10
self.val_id = 30
class RefineParsers(BaseParsers):
def __init__(self, exp_name):
super(RefineParsers, self).__init__(exp_name)
self.batch_size = 16
self.lr = 1e-3
self.weight_decay = 5e-4
self.gamma = 0.95
self.start_epoch = 1
self.num_epochs = 100
self.reuse = 0
self.print_freq = 10
self.val_id = 30
class TestParsers(BaseParsers):
def __init__(self, exp_name):
super(TestParsers, self).__init__(exp_name)
self.weight_path = "../Exps/{}/ckpts/ckpt-100.pth".format(exp_name)
self.results_path = "Results"
class RefineTestParsers(BaseParsers):
def __init__(self, exp_name):
super(RefineTestParsers, self).__init__(exp_name)
self.weight_path = "../Exps/{}/ckpts/ckpt-best.pth".format(exp_name)
self.results_path = "Results_Locate"
| 1,830
| 25.157143
| 76
|
py
|
RLNLocalization
|
RLNLocalization-main/op/data_op.py
|
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.ndimage import center_of_mass
from random import uniform
from torch.utils.data import Dataset
from torchvision import transforms as T
from torchvision.transforms.functional import crop, to_tensor
def load_list(text_path, view):
def _read_text(path):
data = open(path, 'r').readlines()
data = [item.replace('\n', '') for item in data]
return data
train_list, val_list, test_list = [], [], []
if 'left' in view:
train_list += _read_text(os.path.join(text_path, "train_L-RLN.txt"))
val_list += _read_text(os.path.join(text_path, "val_L-RLN.txt"))
test_list += _read_text(os.path.join(text_path, "test_L-RLN.txt"))
if 'right' in view:
train_list += _read_text(os.path.join(text_path, "train_R-RLN.txt"))
val_list += _read_text(os.path.join(text_path, "val_R-RLN.txt"))
test_list += _read_text(os.path.join(text_path, "test_R-RLN.txt"))
return train_list, val_list, test_list
class RLNDataset(Dataset):
def __init__(self, data_list):
self.data_list = data_list
self.img_transform = T.Compose([
T.Resize((256, 256), Image.BILINEAR),
T.ToTensor(),
])
self.mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
def __getitem__(self, item):
item_path = self.data_list[item].replace('\\', '/')
img_name = os.listdir(os.path.join(item_path, "IMG"))[0]
img_path = os.path.join(item_path, "IMG", img_name)
item_name = '{}-{}-{}'.format(*item_path.split('/')[3:])
img = Image.open(img_path).convert('L')
img_tensor = self.img_transform(img)
msk_list = []
for idx, mask_item in enumerate(["CCA", "thyroid", "trachea"]):
msk = Image.open(os.path.join(item_path, "MASK", "{}.jpg".format(mask_item)))
msk = self.mask_transform(msk)
msk = np.array(msk) / 255 * (idx+1)
msk_list.append(torch.from_numpy(msk).long())
msk_tensor = torch.stack(msk_list, dim=0)
msk_tensor = torch.max(msk_tensor, dim=0)[0]
return img_tensor, msk_tensor, item_name
def __len__(self):
return len(self.data_list)
class RLNRefineDataset(Dataset):
def __init__(self, data_list):
self.data_list = data_list
self.img_transform = T.Compose([
T.Resize((256, 256), Image.BILINEAR),
# T.ToTensor(),
])
self.mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
def __getitem__(self, item):
item_path = self.data_list[item].replace('\\', '/')
img_name = os.listdir(os.path.join(item_path, "IMG"))[0]
img_path = os.path.join(item_path, "IMG", img_name)
item_name = '{}-{}-{}'.format(*item_path.split('/')[3:])
img = Image.open(img_path).convert('L')
img = self.img_transform(img)
rln_path = os.path.join(item_path, "MASK", "RLN.jpg")
rln_msk = Image.open(rln_path).convert('L')
rln_msk = self.mask_transform(rln_msk)
rln_msk_arr = np.array(rln_msk)
rln_coord = list(center_of_mass(rln_msk_arr))
# ==================== Single Crop =================================== #
# top, left = rln_coord[0] - 32, rln_coord[1] - 32 # patch size is 64
#
# cropped_center_h, cropped_center_w = 32, 32
# height, width = 64, 64
#
# random_shift_h, random_shift_w = uniform(-15, 15), uniform(-15, 15)
#
# top += random_shift_h
# left += random_shift_w
#
# cropped_center_h -= random_shift_h
# cropped_center_w -= random_shift_w
#
# cropped_img = crop(img, top=top, left=left, height=height, width=width)
# ==================== Single Crop =================================== #
# ==================== Multi Crop =================================== #
cropped_center_h, cropped_center_w = 32, 32
random_shift_h, random_shift_w = uniform(-20, 20), uniform(-20, 20)
cropped_center_h -= random_shift_h
cropped_center_w -= random_shift_w
cropped_img_s = crop(
img,
top=rln_coord[0] + random_shift_h - 12,
left=rln_coord[1] + random_shift_w - 12,
height=24, width=24
)
# cropped_img_m = crop(
# img,
# top=rln_coord[0] + random_shift_h - 16,
# left=rln_coord[1] + random_shift_w - 16,
# height=32, width=32
# )
cropped_img_l = crop(
img,
top=rln_coord[0] + random_shift_h - 32,
left=rln_coord[1] + random_shift_w - 32,
height=64, width=64
)
# ==================== Multi Crop =================================== #
# plt.figure()
# plt.subplot(1, 2, 1)
# plt.imshow(cropped_img, cmap='gray')
# plt.scatter(x=cropped_center_w, y=cropped_center_h, s=5, c='cyan')
# plt.subplot(1, 2, 2)
# plt.imshow(img, cmap='gray')
# plt.scatter(x=rln_coord[1], y=rln_coord[0], s=5, c='cyan')
# plt.show()
# img_tensor = to_tensor(img.convert('RGB'))
cropped_img_s_tensor = to_tensor(cropped_img_s)
# cropped_img_m_tensor = to_tensor(cropped_img_m)
cropped_img_l_tensor = to_tensor(cropped_img_l)
center_coord = torch.from_numpy(np.array([cropped_center_h, cropped_center_w])).float()
return [cropped_img_s_tensor, cropped_img_l_tensor], center_coord, item_name
def __len__(self):
return len(self.data_list)
class RLNRriorDataset(Dataset):
def __init__(self, data_list):
self.data_list = data_list
self.img_transform = T.Compose([
T.Resize((256, 256), Image.BILINEAR),
# T.ToTensor(),
])
self.mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
def __getitem__(self, item):
item_path = self.data_list[item].replace('\\', '/')
img_name = os.listdir(os.path.join(item_path, "IMG"))[0]
img_path = os.path.join(item_path, "IMG", img_name)
item_name = '{}-{}-{}'.format(*item_path.split('/')[3:])
prior_result_path = os.path.join('Prior_Results', '{}-{}-{}.npy'.format(*item_path.split('/')[3:]))
prior_result = np.load(prior_result_path, allow_pickle=True).item()
init_coord = prior_result['Weighted_Mean']
img = Image.open(img_path).convert('L')
img = self.img_transform(img)
rln_path = os.path.join(item_path, "MASK", "RLN.jpg")
rln_msk = Image.open(rln_path).convert('L')
rln_msk = self.mask_transform(rln_msk)
rln_msk_arr = np.array(rln_msk)
rln_coord = list(center_of_mass(rln_msk_arr))
# ==================== Single Crop =================================== #
# top, left = init_coord[0] - 32, init_coord[1] - 32 # patch size is 64
# height, width = 64, 64
# cropped_rln_coord = [rln_coord[0] - top, rln_coord[1] - left]
# cropped_img = crop(img, top=top, left=left, height=height, width=width)
# ==================== Single Crop =================================== #
# ==================== Multi Crop =================================== #
top, left = init_coord[0] - 32, init_coord[1] - 32
cropped_rln_coord = [rln_coord[0] - top, rln_coord[1] - left]
cropped_img_s = crop(
img,
top=init_coord[0] - 12,
left=init_coord[1] - 12,
height=24, width=24
)
# cropped_img_m = crop(
# img,
# top=init_coord[0] - 16,
# left=init_coord[1] - 16,
# height=32, width=32
# )
cropped_img_l = crop(
img,
top=init_coord[0] - 32,
left=init_coord[1] - 32,
height=64, width=64
)
# ==================== Multi Crop =================================== #
# plt.figure()
# plt.subplot(1, 2, 1)
# plt.imshow(cropped_img, cmap='gray')
# plt.scatter(x=cropped_rln_coord[1], y=cropped_rln_coord[0], s=5, c='cyan')
# plt.scatter(x=32, y=32, s=5, c='red')
# plt.subplot(1, 2, 2)
# plt.imshow(img, cmap='gray')
# plt.scatter(x=rln_coord[1], y=rln_coord[0], s=5, c='cyan')
# plt.show()
img_tensor = to_tensor(img.convert('RGB'))
# cropped_img_tensor = to_tensor(cropped_img)
cropped_img_s_tensor = to_tensor(cropped_img_s)
# cropped_img_m_tensor = to_tensor(cropped_img_m)
cropped_img_l_tensor = to_tensor(cropped_img_l)
center_coord = torch.from_numpy(np.array(cropped_rln_coord))
move_coord = torch.from_numpy(np.array([top, left]))
return img_tensor, [cropped_img_s_tensor, cropped_img_l_tensor], center_coord, move_coord, item_name
def __len__(self):
return len(self.data_list)
| 9,157
| 36.076923
| 108
|
py
|
RLNLocalization
|
RLNLocalization-main/op/run_op.py
|
import os
import torch
from PIL import Image
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from op.data_op import RLNDataset, RLNRefineDataset, RLNRriorDataset
from utils import Recorder, set_device, tensor2array
import os
import torch
import numpy as np
from medpy.metric import dc
from datetime import datetime
from op.model_op import load_model
from op.data_op import load_list
from utils import check_dir, Plotter
class Baser(object):
def __init__(self, args):
self.log_path = args.log_path
self.ckpt_path = args.ckpt_path
self.save_path = args.save_path
self.view = args.view
check_dir(self.log_path)
check_dir(self.ckpt_path)
check_dir(self.save_path)
self.train_list, self.val_list, self.test_list = load_list(args.text_path, args.view)
self.device = set_device(args.gpu_id)
self.model = load_model(args).to(self.device)
if args.init_path:
checkpoint = torch.load(args.init_path, map_location=self.device)
print("Load from", args.init_path)
self.model.load_state_dict(checkpoint["model"])
now = datetime.now()
self.log = os.path.join(self.log_path, now.strftime("%m-%d-%Y-%H-%M-%S") + ".txt")
open(self.log, "w+").close()
self.plotter = Plotter(self.log_path)
class Trainer(Baser):
def __init__(self, args):
super(Trainer, self).__init__(args)
train_set = RLNDataset(
self.train_list)
val_set = RLNDataset(
self.val_list)
self.train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=0)
self.val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
self.recorder = Recorder(["Total", "CrossEntropy"])
self.epoch_count = args.start_epoch
self.args = args
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1, gamma=args.gamma)
if args.reuse:
self.load_weight(args.reuse)
def load_weight(self, reuse):
if isinstance(reuse, int) or reuse == "best":
weight_path = os.path.join(self.ckpt_path, "ckpt-{}.pth".format(reuse))
elif isinstance(reuse, str):
weight_path = reuse
else:
raise NameError
checkpoint = torch.load(weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
print("Load weight with {}".format(weight_path))
def val(self):
print("Evaluating...")
self.model.eval()
running_metric = 0.0
count = 0
for idx, (x, y, subj_id) in enumerate(self.val_loader):
with torch.no_grad():
metric = self.model.evaluate(x.to(self.device), y.to(self.device))
running_metric += metric
count += 1
running_metric /= count
self.plotter.update({"val_metric": running_metric})
self.plotter.send()
return running_metric
def train(self):
self.model.train()
self.recorder.reset()
for idx, (x, y, subj_id) in enumerate(self.train_loader):
loss, loss_info = self.model.loss_function(x.to(self.device), y.to(self.device))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.recorder.update({
"Total": loss_info["Total"],
"CrossEntropy": loss_info["CrossEntropy"],
})
if idx % self.args.print_freq == 0 and idx != 0:
info = "Epoch {} Batch {} Total Loss: {:.4f} CrossEntropy Loss: {:.4f} ".format(
self.epoch_count, idx,
self.recorder.call("Total"), self.recorder.call("CrossEntropy"))
print(info)
open(self.log, "a+").write(info + "\n")
self.plotter.update({
"TotalLoss": self.recorder.call("Total"),
"CrossEntropyLoss": self.recorder.call("CrossEntropy"),
})
self.plotter.send()
self.scheduler.step()
return self.recorder.call("Total")
def update_count(self, count_num=1):
self.epoch_count += count_num
def save_weight(self, attr):
weight_dict = dict()
weight_dict["model"] = self.model.state_dict()
weight_dict["optimizer"] = self.optimizer.state_dict()
torch.save(weight_dict, os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr)))
print("Saving model to {}".format(os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr))))
class Tester(Baser):
def __init__(self, args):
super(Tester, self).__init__(args)
test_set = RLNDataset(self.test_list)
self.test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
self.results_path = args.results_path
check_dir(self.results_path)
self.args = args
checkpoint = torch.load(args.weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
print("Load weight with {}".format(args.weight_path))
def test(self):
print("Evaluating...")
self.model.eval()
metric_arr = np.zeros((3, len(self.test_loader)), dtype=np.float64)
for idx, (x, y, subj_id) in enumerate(self.test_loader):
with torch.no_grad():
out = self.model(x.to(self.device))
img = tensor2array(x, True) * 255
out = tensor2array(out, True)
out = np.argmax(out, axis=0).astype(np.int32)
pil_out = Image.fromarray(out * 50).convert('L')
pil_out.save(os.path.join(self.results_path, "{}.png".format(subj_id[0])))
y = tensor2array(y, True)
metric_arr[:, idx] = np.array([
dc(out == 1, y == 1),
dc(out == 2, y == 2),
dc(out == 3, y == 3)
])
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(img, cmap="gray")
plt.title("Img")
plt.xticks([])
plt.yticks([])
plt.subplot(1, 3, 2)
plt.imshow(out)
plt.title("Out")
plt.xticks([])
plt.yticks([])
plt.subplot(1, 3, 3)
plt.imshow(y)
plt.title("Msk")
plt.xticks([])
plt.yticks([])
# plt.show()
plt.savefig(os.path.join(self.save_path, "{}.png".format(subj_id[0])))
plt.close()
print(subj_id, metric_arr[:, idx])
avg_metric = np.mean(metric_arr, axis=-1)
print("CCA\tthyroid\ttrachea")
print("{:.3f}\t{:.3f}\t{:.3f}".format(*avg_metric.tolist()))
class Refiner(Baser):
def __init__(self, args):
super(Refiner, self).__init__(args)
train_set = RLNRefineDataset(
self.train_list)
val_set = RLNRefineDataset(
self.val_list)
self.train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=0)
self.val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
self.recorder = Recorder(["Total", "Regression"])
self.epoch_count = args.start_epoch
self.args = args
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1, gamma=args.gamma) # todo useless
if args.reuse:
self.load_weight(args.reuse)
def load_weight(self, reuse):
if isinstance(reuse, int) or reuse == "best":
weight_path = os.path.join(self.ckpt_path, "ckpt-{}.pth".format(reuse))
elif isinstance(reuse, str):
weight_path = reuse
else:
raise NameError
checkpoint = torch.load(weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
print("Load weight with {}".format(weight_path))
def val(self):
print("Evaluating...")
self.model.eval()
running_metric = 0.0
count = 0
for idx, (xs, y, subj_id) in enumerate(self.val_loader):
with torch.no_grad():
metric = self.model.evaluate([x.to(self.device) for x in xs], y.to(self.device))
running_metric += metric
count += 1
running_metric /= count
self.plotter.update({"val_metric": running_metric})
self.plotter.send()
return running_metric
def train(self):
self.model.train()
self.recorder.reset()
for idx, (xs, y, subj_id) in enumerate(self.train_loader):
loss, loss_info = self.model.loss_function([x.to(self.device) for x in xs], y.to(self.device))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.recorder.update({
"Total": loss_info["Total"],
"Regression": loss_info["Regression"],
})
if idx % self.args.print_freq == 0 and idx != 0:
info = "Epoch {} Batch {} Total Loss: {:.4f} Regression Loss: {:.4f} ".format(
self.epoch_count, idx,
self.recorder.call("Total"), self.recorder.call("Regression"))
print(info)
open(self.log, "a+").write(info + "\n")
self.plotter.update({
"TotalLoss": self.recorder.call("Total"),
"RegressionLoss": self.recorder.call("Regression"),
})
self.plotter.send()
self.scheduler.step()
return self.recorder.call("Total")
def update_count(self, count_num=1):
self.epoch_count += count_num
def save_weight(self, attr):
weight_dict = dict()
weight_dict["model"] = self.model.state_dict()
weight_dict["optimizer"] = self.optimizer.state_dict()
torch.save(weight_dict, os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr)))
print("Saving model to {}".format(os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr))))
class RefineTester(Baser):
def __init__(self, args):
super(RefineTester, self).__init__(args)
test_set = RLNRriorDataset(self.test_list)
self.test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
self.results_path = args.results_path
check_dir(self.results_path)
self.args = args
checkpoint = torch.load(args.weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
print("Load weight with {}".format(args.weight_path))
def test(self):
print("Evaluating...")
self.model.eval()
metric_arr = np.zeros((1, len(self.test_loader)), dtype=np.float64)
for idx, (img, xs, y, move, subj_id) in enumerate(self.test_loader):
with torch.no_grad():
out = self.model([x.to(self.device) for x in xs])
img = tensor2array(img, True)
out = tensor2array(out, True)
out = np.clip(out, a_min=0, a_max=64)
y = tensor2array(y, True)
move = tensor2array(move, True)
y += move
out += move
metric_arr[:, idx] = np.linalg.norm(y - out)
# plt.figure()
# plt.imshow(img, cmap="gray")
# plt.scatter(x=out[1], y=out[0], s=5, c='cyan', label='out')
# plt.scatter(x=y[1], y=y[0], s=5, c='red', label='y')
# plt.title("Cropped Img")
# plt.legend()
# plt.text(55, 15, '{:.3f}'.format(np.linalg.norm(y - out)))
# # plt.show()
# plt.savefig(os.path.join(self.save_path, "{}.png".format(subj_id[0])))
# plt.close()
# print(subj_id, metric_arr[:, idx])
plt.figure()
plt.imshow(img[0], cmap="gray")
plt.scatter(x=out[1], y=out[0], s=30, c='cyan', label='prediction')
plt.scatter(x=y[1], y=y[0], s=30, c='red', label='ground truth')
# plt.title("Cropped Img")
plt.legend(fontsize=18)
plt.text(55, 15, '{:.3f}'.format(np.linalg.norm(y - out)))
plt.axis('off')
# plt.show()
plt.savefig(os.path.join(self.save_path, "{}.png".format(subj_id[0])), dpi=300, bbox_inches='tight',
pad_inches=0.0)
plt.close()
print(subj_id, metric_arr[:, idx])
avg_metric = np.mean(metric_arr, axis=-1)
std_metric = np.std(metric_arr, axis=-1)
print("L2 Dist")
print("{:.4f}-{:.4f}".format(float(avg_metric), float(std_metric)))
print("Hit 15")
print("{:.3f}".format(np.mean(metric_arr < 15)))
| 13,171
| 34.6
| 112
|
py
|
RLNLocalization
|
RLNLocalization-main/op/__init__.py
| 0
| 0
| 0
|
py
|
|
metfaces-dataset
|
metfaces-dataset-master/metfaces.py
|
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import numpy as np
import os
import PIL.Image
import scipy.ndimage
from tqdm import tqdm
_examples = '''examples:
# Run x, y, z
python %(prog)s --output=tmp
'''
def extract_face(face, source_images, output_dir, rng, target_size=1024, supersampling=4, enable_padding=True, random_shift=0.0, retry_crops=False, rotate_level=True):
def rot90(v) -> np.ndarray:
return np.array([-v[1], v[0]])
# Sanitize facial landmarks.
face_spec = face['face_spec']
landmarks = (np.float32(face_spec['landmarks']) + 0.5) * face_spec['shrink']
assert landmarks.shape == (68, 2)
lm_eye_left = landmarks[36 : 42] # left-clockwise
lm_eye_right = landmarks[42 : 48] # left-clockwise
lm_mouth_outer = landmarks[48 : 60] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
if rotate_level:
# Orient according to tilt of the input image
x = eye_to_eye - rot90(eye_to_mouth)
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = rot90(x)
c0 = eye_avg + eye_to_mouth * 0.1
else:
# Do not match the tilt in the source data, i.e., use an axis-aligned rectangle
x = np.array([1, 0], dtype=np.float64)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c0 = eye_avg + eye_to_mouth * 0.1
# Load.
img = PIL.Image.open(os.path.join(source_images, face['source_path'])).convert('RGB')
# Calculate auxiliary data.
qsize = np.hypot(*x) * 2
quad = np.stack([c0 - x - y, c0 - x + y, c0 + x + y, c0 + x - y])
# Keep drawing new random crop offsets until we find one that is contained in the image
# and does not require padding
if random_shift != 0:
for _ in range(1000):
# Offset the crop rectange center by a random shift proportional to image dimension
# and the requested standard deviation (by default 0)
c = (c0 + np.hypot(*x)*2 * random_shift * rng.normal(0, 1, c0.shape))
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
if not retry_crops or not (crop[0] < 0 or crop[1] < 0 or crop[2] >= img.width or crop[3] >= img.height):
# We're happy with this crop (either it fits within the image, or retries are disabled)
break
else:
# rejected N times, give up and move to next image
# (does not happen in practice with the MetFaces data)
print('rejected image %s' % face['source_path'])
return
# Shrink.
shrink = int(np.floor(qsize / target_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
super_size = target_size * supersampling
img = img.transform((super_size, super_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if target_size < super_size:
img = img.resize((target_size, target_size), PIL.Image.ANTIALIAS)
# Save face image.
img.save(os.path.join(output_dir, f"{face['obj_id']}-{face['face_idx']:02d}.png"))
def main():
parser = argparse.ArgumentParser(
description='MetFaces dataset processing tool',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--json', help='MetFaces metadata json file path', required=True)
parser.add_argument('--source-images', help='Location of MetFaces raw image data', required=True)
parser.add_argument('--output-dir', help='Where to save output files')
parser.add_argument('--random-shift', help='Standard deviation of random crop rectangle jitter', type=float, default=0.0, metavar='SHIFT')
parser.add_argument('--retry-crops', help='Retry random shift if crop rectangle falls outside image (up to 1000 times)', dest='retry_crops', default=False, action='store_true')
parser.add_argument('--no-rotation', help='Keep the original orientation of images', dest='no_rotation', default=False, action='store_true')
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
rng = np.random.RandomState(12345) # fix the random seed for reproducibility
with open(args.json, encoding="utf8") as fin:
faces = json.load(fin)
for f in tqdm(faces):
extract_face(f, source_images=args.source_images, output_dir=args.output_dir, rng=rng,
random_shift=args.random_shift, retry_crops=args.retry_crops, rotate_level=not args.no_rotation)
if __name__ == "__main__":
main()
| 7,389
| 45.772152
| 180
|
py
|
Wasserstein2Barycenters
|
Wasserstein2Barycenters-main/src/distributions.py
|
import torch
import numpy as np
from scipy.linalg import sqrtm
import sklearn.datasets
import random
def symmetrize(X):
return np.real((X + X.T) / 2)
class Sampler:
def __init__(
self, device='cuda',
requires_grad=False,
):
self.device = device
self.requires_grad = requires_grad
def sample(self, batch_size=5):
pass
def _estimate_mean(self, num_samples=100000):
batch = self.sample(num_samples).cpu().detach().numpy()
self.mean = batch.mean(axis=0).astype(np.float32)
def _estimate_cov(self, num_samples=100000):
batch = self.sample(num_samples).cpu().detach().numpy()
self.cov = np.cov(batch.T).astype(np.float32)
self.var = np.trace(self.cov)
class StandardNormalSampler(Sampler):
def __init__(
self, dim=2, device='cuda',
requires_grad=False
):
super(StandardNormalSampler, self).__init__(device, requires_grad)
self.dim = dim
self.mean = np.zeros(self.dim, dtype=np.float32)
self.cov = np.eye(self.dim, dtype=np.float32)
self.var = self.dim
def sample(self, batch_size=10):
return torch.randn(
batch_size, self.dim,
device=self.device,
requires_grad=self.requires_grad
)
class NormalSampler(Sampler):
def __init__(
self, mean, cov=None, weight=None, device='cuda',
requires_grad=False
):
super(NormalSampler, self).__init__(device=device, requires_grad=requires_grad)
self.mean = np.array(mean, dtype=np.float32)
self.dim = self.mean.shape[0]
if weight is not None:
weight = np.array(weight, dtype=np.float32)
if cov is not None:
self.cov = np.array(cov, dtype=np.float32)
elif weight is not None:
self.cov = weight @ weight.T
else:
self.cov = np.eye(self.dim, dtype=np.float32)
if weight is None:
weight = symmetrize(sqrtm(self.cov))
self.var = np.trace(self.cov)
self.weight = torch.tensor(weight, device=self.device, dtype=torch.float32)
self.bias = torch.tensor(self.mean, device=self.device, dtype=torch.float32)
def sample(self, batch_size=4):
batch = torch.randn(batch_size, self.dim, device=self.device)
with torch.no_grad():
batch = batch @ self.weight.T
if self.bias is not None:
batch += self.bias
batch.requires_grad_(self.requires_grad)
return batch
class CubeUniformSampler(Sampler):
def __init__(
self, dim=1, centered=False, normalized=False, device='cuda',
requires_grad=False
):
super(CubeUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = dim
self.centered = centered
self.normalized = normalized
self.var = self.dim if self.normalized else (self.dim / 12)
self.cov = np.eye(self.dim, dtype=np.float32) if self.normalized else np.eye(self.dim, dtype=np.float32) / 12
self.mean = np.zeros(self.dim, dtype=np.float32) if self.centered else .5 * np.ones(self.dim, dtype=np.float32)
self.bias = torch.tensor(self.mean, device=self.device)
def sample(self, batch_size=10):
return np.sqrt(self.var) * (torch.rand(
batch_size, self.dim, device=self.device,
requires_grad=self.requires_grad
) - .5) / np.sqrt(self.dim / 12) + self.bias
class BoxUniformSampler(Sampler):
# A uniform box with axes components and the range on each
# axis i is [a_min[i], a_max[i]].
def __init__(
self, components, a_min, a_max, estimate_size=100000,
device='cuda', requires_grad=False
):
super(BoxUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = components.shape[1]
self.components = torch.from_numpy(components).float().to(device=device)
self.a_min = torch.from_numpy(a_min).float().to(device=device)
self.a_max = torch.from_numpy(a_max).float().to(device=device)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size):
with torch.no_grad():
batch = torch.rand(
batch_size, self.dim,
device=self.device
)
batch = (torch.unsqueeze(self.a_min, 0) +
batch * torch.unsqueeze(self.a_max - self.a_min, 0))
batch = torch.matmul(batch, self.components)
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class EmpiricalSampler(Sampler):
def __init__(
self, data, estimate_size=100000,
device='cuda', requires_grad=False
):
super(EmpiricalSampler, self).__init__(
device=device, requires_grad=requires_grad
)
# data is a np array NxD
self.dim = data.shape[1]
self.num_points = data.shape[0]
self.data = torch.from_numpy(data).float().to(device=device)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size):
inds = torch.randperm(self.num_points)
if batch_size <= self.num_points:
inds = inds[:batch_size]
else:
additional_inds = torch.randint(0, self.num_points, (batch_size - self.num_points))
inds = torch.cat([inds, additional_inds], dim=0)
inds_repeated = torch.unsqueeze(inds, 1).repeat(1, self.dim)
batch = torch.gather(self.data, 0, inds_repeated.to(device=self.device))
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class TensorDatasetSampler(Sampler):
def __init__(
self, dataset, transform=None, storage='cpu', storage_dtype=torch.float,
device='cuda', requires_grad=False, estimate_size=100000,
):
super(TensorDatasetSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.storage = storage
if transform is not None:
self.transform = transform
else:
self.transform = lambda t: t
self.storage_dtype = storage_dtype
self.dataset = torch.tensor(
dataset, device=storage, dtype=storage_dtype, requires_grad=False
)
self.dim = self.sample(1).shape[1]
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
if batch_size:
ind = random.choices(range(len(self.dataset)), k=batch_size)
else:
ind = range(len(self.dataset))
with torch.no_grad():
batch = self.transform(torch.tensor(
self.dataset[ind], device=self.device,
dtype=torch.float32, requires_grad=False
))
if self.requires_grad:
batch.requires_grad_(True)
return batch
class BallCrustUniformSampler(Sampler):
def __init__(
self, dim=2, r_min=0.8, r_max=1.2, estimate_size=100000,
device='cuda', requires_grad=False
):
super(BallCrustUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = dim
assert r_min >= 0
assert r_min < r_max
self.r_min, self.r_max = r_min, r_max
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
with torch.no_grad():
batch = torch.randn(
batch_size, self.dim,
device=self.device
)
batch /= torch.norm(batch, dim=1)[:, None]
ratio = (1 - (self.r_max - self.r_min) / self.r_max) ** self.dim
r = (torch.rand(
batch_size, device=self.device
) * (1 - ratio) + ratio) ** (1. / self.dim)
return torch.tensor(
(batch.transpose(0, 1) * r * self.r_max).transpose(0, 1),
device=self.device,
requires_grad=self.requires_grad
)
class MixN2GaussiansSampler(Sampler):
def __init__(self, n=5, std=1, step=9, device='cuda', estimate_size=100000,
requires_grad=False
):
super(MixN2GaussiansSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = 2
self.std, self.step = std, step
self.n = n
grid_1d = np.linspace(-(n-1) / 2., (n-1) / 2., n)
xx, yy = np.meshgrid(grid_1d, grid_1d)
centers = np.stack([xx, yy]).reshape(2, -1).T
self.centers = torch.tensor(
centers,
device=self.device,
dtype=torch.float32
)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
batch = torch.randn(
batch_size, self.dim,
device=self.device
)
indices = random.choices(range(len(self.centers)), k=batch_size)
batch *= self.std
batch += self.step * self.centers[indices, :]
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class CubeCrustUniformSampler(Sampler):
def __init__(
self, dim=2, r_min=0.8, r_max=1.2, estimate_size=100000, device='cuda',
requires_grad=False
):
super(CubeCrustUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = dim
assert r_min >= 0
assert r_min < r_max
self.r_min, self.r_max = r_min, r_max
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
with torch.no_grad():
batch = 2 * torch.rand(
batch_size, self.dim,
device=self.device
) - 1
axes = torch.randint(0, self.dim, size=(batch_size, 1), device=self.device)
batch.scatter_(
1, axes,
2 * ((batch.gather(1, axes) > 0)).type(torch.float32) - 1
)
ratio = (1 - (self.r_max - self.r_min) / self.r_max) ** self.dim
r = (torch.rand(
batch_size, device=self.device
) * (1 - ratio) + ratio) ** (1. / self.dim)
return torch.tensor(
(batch.transpose(0, 1) * self.r_max * r).transpose(0, 1),
device=self.device,
requires_grad=self.requires_grad
)
class SwissRollSampler(Sampler):
def __init__(
self, estimate_size=100000, device='cuda', requires_grad=False
):
super(SwissRollSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = 2
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
batch = sklearn.datasets.make_swiss_roll(
n_samples=batch_size,
noise=0.8
)[0].astype(np.float32)[:, [0, 2]] / 7.5
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class Mix8GaussiansSampler(Sampler):
def __init__(
self, with_central=False, std=1, r=12,
estimate_size=100000,
device='cuda', requires_grad=False
):
super(Mix8GaussiansSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = 2
self.std, self.r = std, r
self.with_central = with_central
centers = [
(1, 0), (-1, 0), (0, 1), (0, -1),
(1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)),
(-1. / np.sqrt(2), 1. / np.sqrt(2)),
(-1. / np.sqrt(2), -1. / np.sqrt(2))
]
if self.with_central:
centers.append((0, 0))
self.centers = torch.tensor(
centers, device=self.device
)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
with torch.no_grad():
batch = torch.randn(
batch_size, self.dim,
device=self.device
)
indices = random.choices(range(len(self.centers)), k=batch_size)
batch *= self.std
batch += self.r * self.centers[indices, :]
if self.requires_grad:
batch.requires_grad_(True)
return batch
class Transformer(object):
def __init__(
self, device='cuda',
requires_grad=False
):
self.device = device
self.requires_grad = requires_grad
class LinearTransformer(Transformer):
def __init__(
self, weight, bias=None, base_sampler=None,
device='cuda',
requires_grad=False
):
super(LinearTransformer, self).__init__(
device=device,
requires_grad=requires_grad
)
self.fitted = False
self.dim = weight.shape[0]
self.weight = torch.tensor(weight, device=device, dtype=torch.float32, requires_grad=False)
if bias is not None:
self.bias = torch.tensor(bias, device=device, dtype=torch.float32, requires_grad=False)
else:
self.bias = torch.zeros(self.dim, device=device, dtype=torch.float32, requires_grad=False)
if base_sampler is not None:
self.fit(base_sampler)
def fit(self, base_sampler):
self.base_sampler = base_sampler
weight, bias = self.weight.cpu().numpy(), self.bias.cpu().numpy()
self.mean = weight @ self.base_sampler.mean + bias
self.cov = weight @ self.base_sampler.cov @ weight.T
self.var = np.trace(self.cov)
self.fitted = True
return self
def sample(self, batch_size=4):
assert self.fitted == True
batch = torch.tensor(
self.base_sampler.sample(batch_size),
device=self.device, requires_grad=False
)
with torch.no_grad():
batch = batch @ self.weight.T
if self.bias is not None:
batch += self.bias
batch = batch.detach()
batch.requires_grad_(self.requires_grad)
return batch
class StandardNormalScaler(Transformer):
def __init__(
self, base_sampler=None, device='cuda', requires_grad=False
):
super(StandardNormalScaler, self).__init__(
device=device, requires_grad=requires_grad
)
if base_sampler is not None:
self.fit(base_sampler)
def fit(self, base_sampler, batch_size=1000):
self.base_sampler = base_sampler
self.dim = self.base_sampler.dim
self.bias = torch.tensor(
self.base_sampler.mean, device=self.device, dtype=torch.float32
)
weight = symmetrize(np.linalg.inv(sqrtm(self.base_sampler.cov)))
self.weight = torch.tensor(weight, device=self.device, dtype=torch.float32)
self.mean = np.zeros(self.dim, dtype=np.float32)
self.cov = weight @ self.base_sampler.cov @ weight.T
self.var = np.trace(self.cov)
return self
def sample(self, batch_size=10):
batch = torch.tensor(
self.base_sampler.sample(batch_size),
device=self.device, requires_grad=False
)
with torch.no_grad():
batch -= self.bias
batch @= self.weight
if self.requires_grad:
batch.requires_grad_(True)
return batch
| 16,356
| 33.006237
| 119
|
py
|
Wasserstein2Barycenters
|
Wasserstein2Barycenters-main/src/benchmarks.py
|
import torch
import torch.nn as nn
import numpy as np
from scipy.stats import ortho_group
from scipy.linalg import sqrtm
from .tools import calculate_frechet_distance
from tqdm import tqdm_notebook as tqdm
from . import distributions
def symmetrize(X):
return np.real((X + X.T) / 2)
def get_barycenter_cov(covs, alphas, max_iter=1000, tol=1e-8, verbose=True):
# Iterative computation of barycenter's covariance
# matrix via fixed-point approach
bar_cov = np.eye(covs[0].shape[0], dtype=np.float32)
for iteration in tqdm(range(max_iter)) if verbose else range(max_iter):
bar_cov_old = bar_cov
root_bar_cov = symmetrize(sqrtm(bar_cov))
inv_root_bar_cov = symmetrize(np.linalg.inv(root_bar_cov))
# To remove cycle Batch sqrtm required (does it exist?)
inner_sum = 0.
for k in range(len(alphas)):
inner_sum += alphas[k] * symmetrize(sqrtm(root_bar_cov @ covs[k] @ root_bar_cov))
inner_sum = symmetrize(inner_sum)
inner_sum = inner_sum @ inner_sum
bar_cov = symmetrize(inv_root_bar_cov @ inner_sum @ inv_root_bar_cov)
if np.max(np.abs((bar_cov - bar_cov_old))) < tol:
break
return bar_cov
def get_linear_transport(mean1, cov1, mean2, cov2):
root_cov1 = symmetrize(sqrtm(cov1))
inv_root_cov1 = symmetrize(np.linalg.inv(root_cov1))
weight = inv_root_cov1 @ symmetrize(sqrtm(root_cov1 @ cov2 @ root_cov1)) @ inv_root_cov1
bias = mean2 - weight @ mean1
return weight, bias
class Benchmark:
pass
class Wasserstein2BarycenterBenchmark(Benchmark):
def __init__(
self, samplers, bar_sampler=None, alphas=None,
compute_gaussian=True, max_iter=1000, tol=1e-6,
device='cuda', requires_grad=False, verbose=False
):
super(Wasserstein2BarycenterBenchmark, self).__init__()
self.verbose = verbose
self.dim = samplers[0].dim
self.num = len(samplers)
if alphas is not None:
self.alphas = alphas
else:
self.alphas = np.ones(self.num, dtype=np.float32) / self.num
self.device = device
self.requires_grad = requires_grad
self.samplers = samplers
self.gauss_bar_sampler = None
self.bar_sampler = bar_sampler
self.bar_maps = None
self.bar_maps_inv = None
self.bar_cost = None
if compute_gaussian:
self._compute_gaussian_barycenter(max_iter=max_iter, tol=tol)
def _compute_gaussian_barycenter(self, max_iter=1000, tol=1e-6):
if self.verbose:
print(f'Computing Gaussian Barycenter Covariance, max_iter={max_iter}')
gauss_bar_cov = get_barycenter_cov(
[sampler.cov for sampler in self.samplers], self.alphas,
max_iter, tol, verbose=self.verbose
)
self.gauss_bar_sampler = distributions.NormalSampler(
np.zeros(self.dim, dtype=np.float32), cov=gauss_bar_cov,
device=self.device, requires_grad=self.requires_grad
)
if self.verbose:
print('Computing the Gaussian Barycenter Functional')
self.gauss_bar_cost = np.sum([self.alphas[n] * calculate_frechet_distance(
self.samplers[n].mean, self.samplers[n].cov,
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov,
) for n in range(self.num)])
self.gauss_bar_maps_inv, self.gauss_bar_maps = [], []
for n in tqdm(range(self.num)) if self.verbose else range(self.num):
weight_inv, bias_inv = get_linear_transport(
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov,
self.samplers[n].mean, self.samplers[n].cov,
)
map_inv = nn.Linear(self.dim, self.dim).to(self.device)
map_inv.weight.data = torch.tensor(weight_inv, device=self.device)
map_inv.bias.data = torch.tensor(bias_inv, device=self.device)
self.gauss_bar_maps_inv.append(map_inv)
weight, bias = get_linear_transport(
self.samplers[n].mean, self.samplers[n].cov,
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov,
)
map_fwd = nn.Linear(self.dim, self.dim).to(self.device)
map_fwd.weight.data = torch.tensor(weight, device=self.device)
map_fwd.bias.data = torch.tensor(bias, device=self.device)
self.gauss_bar_maps.append(map_fwd)
class LocationScatterBenchmark(Wasserstein2BarycenterBenchmark):
def __init__(
self, sampler, means, covs, alphas=None,
compute_barycenter=True, max_iter=1000, tol=1e-6,
device='cuda', requires_grad=False, verbose=False
):
samplers = []
for mean, cov in zip(means, covs):
weight, bias = get_linear_transport(sampler.mean, sampler.cov, mean, cov)
samplers.append(
distributions.LinearTransformer(
weight, bias, requires_grad=requires_grad
).fit(sampler)
)
super(LocationScatterBenchmark, self).__init__(
samplers, alphas=alphas,
compute_gaussian=compute_barycenter, max_iter=max_iter, tol=tol,
device=device, requires_grad=requires_grad, verbose=verbose
)
if compute_barycenter:
self.bar_cost = self.gauss_bar_cost
self.bar_maps = self.gauss_bar_maps
self.bar_maps_inv = self.gauss_bar_maps_inv
weight, bias = get_linear_transport(
sampler.mean, sampler.cov,
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov
)
self.bar_sampler = distributions.LinearTransformer(
weight, bias,
requires_grad=self.requires_grad,
device=self.device
).fit(sampler)
class EigenWarpBenchmark(LocationScatterBenchmark):
def __init__(
self, sampler, num=3, min_eig=0.5, max_eig=2., shift=0., alphas=None,
compute_barycenter=True, max_iter=1000, tol=1e-6,
device='cuda', requires_grad=False, verbose=False
):
self.num = num
self.dim = sampler.dim
self.min_eig, self.max_eig = min_eig, max_eig
self.shift = shift
self.verbose = verbose
means = self.shift * np.random.normal(size=(self.num, self.dim)).astype(np.float32)
covs = np.zeros((self.num, self.dim, self.dim), dtype=np.float32)
if self.verbose:
print('Generating Covariance Matrices')
for n in range(self.num):
rotation = ortho_group.rvs(self.dim)
weight = rotation @ np.diag(np.exp(np.linspace(np.log(min_eig), np.log(max_eig), self.dim)))
covs[n] = weight @ weight.T
super(EigenWarpBenchmark, self).__init__(
sampler, means, covs, alphas=alphas,
compute_barycenter=compute_barycenter, max_iter=max_iter, tol=tol,
device=device, requires_grad=requires_grad, verbose=verbose
)
# class RotatedGaussiansBenchmark(Wasserstein2BarycenterBenchmark):
# def __init__(
# self,
# dim=2, count=2, alphas=None,
# eig=(0.5, 2.), shift=3.,
# max_iter=1000, tol=1e-6,
# verbose=False,
# device='cuda',
# dtype=torch.float32,
# requires_grad=False
# ):
# super(RotatedGaussiansBenchmark, self).__init__(
# dim, count, alphas,
# device=device,
# requires_grad=requires_grad
# )
# self.eig = eig
# self.shift = shift
# self.verbose = verbose
# means = self.shift * np.random.normal(size=(self.count, self.dim)).astype(np.float32)
# means -= (means.T * self.alphas).sum(axis=1)
# transforms = np.zeros((self.count, self.dim, self.dim), dtype=np.float32)
# if self.verbose:
# print('Generating Covariance Matrices')
# for k in range(self.count):
# rotation = ortho_group.rvs(self.dim)
# transforms[k] = rotation @ np.diag(np.exp(np.linspace(np.log(eig[0]), np.log(eig[1]), self.dim)))
# if self.verbose:
# print('Initializing samplers')
# self.samplers = [
# distributions.NormalSampler(
# means[k], weight=transforms[k],
# device=self.device, requires_grad=self.requires_grad
# ) for k in range(count)
# ]
# self._compute_barycenter(max_iter, tol)
# def _compute_barycenter(self, max_iter=1000, tol=1e-6):
# if self.verbose:
# print(f'Computing Barycenter Covariance, max_iter={max_iter}')
# bar_cov = get_barycenter_cov(
# [sampler.cov for sampler in self.samplers], self.alphas,
# max_iter, tol, verbose=self.verbose
# )
# self.bar_sampler = distributions.NormalSampler(
# np.zeros(self.dim, dtype=np.float32), cov=bar_cov,
# device=self.device, requires_grad=self.requires_grad
# )
# if self.verbose:
# print('Computing inverse and forward maps to barycenter')
# self.bar_maps_inv, self.bar_maps = [], []
# self.bar_cost = 0.
# for k in tqdm(range(self.count)) if self.verbose else range(self.count):
# weight, bias, weight_inv, bias_inv = get_linear_transport(
# self.samplers[k].mean, self.samplers[k].cov,
# self.bar_sampler.mean, self.bar_sampler.cov,
# )
# map_inv = nn.Linear(self.dim, self.dim).to(self.device)
# map_inv.weight.data = torch.tensor(weight_inv, device=self.device)
# map_inv.bias.data = torch.tensor(bias_inv, device=self.device)
# self.bar_maps_inv.append(map_inv)
# map_fwd = nn.Linear(self.dim, self.dim).to(self.device)
# map_fwd.weight.data = torch.tensor(weight, device=self.device)
# map_fwd.bias.data = torch.tensor(bias, device=self.device)
# self.bar_maps.append(map_fwd)
# self.bar_cost += self.alphas[k] * calculate_frechet_distance(
# self.samplers[k].mean, self.samplers[k].cov,
# self.bar_sampler.mean, self.bar_sampler.cov,
# )
| 10,558
| 40.735178
| 111
|
py
|
Wasserstein2Barycenters
|
Wasserstein2Barycenters-main/src/plotters.py
|
import numpy as np
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torch
import gc
def plot_rgb_cloud(cloud, ax):
colors = np.clip(cloud, 0, 1)
ax.scatter(cloud[:, 0], cloud[:, 1], cloud[:, 2], c=colors)
ax.set_xlabel('Red'); ax.set_ylabel('Green'); ax.set_zlabel('Blue');
ax.set_xlim(0, 1); ax.set_ylim(0, 1); ax.set_zlim(0, 1)
def plot_training_phase(
benchmark, pca, D_list, D_conj_list,
G=None, Z_sampler=None,
plot_batchsize=250, partsize=(5, 5), dpi=200
):
plot_G = True if ((G is not None) and (Z_sampler is not None)) else False
plot_B = True if (benchmark.bar_sampler is not None) or (benchmark.gauss_bar_sampler is not None) else False
fig, axes = plt.subplots(
3, benchmark.num + 2,
figsize=(partsize[0] * (benchmark.num+2), 3 * partsize[1]),
sharex=True, sharey=True, dpi=dpi
)
# Original distributions, pushed and inverse pushed from G(Z)
if plot_G:
Z = Z_sampler.sample(plot_batchsize).detach()
Y = G(Z).detach()
Y.requires_grad_(True)
Y_pca = pca.transform(Y.cpu().detach().numpy())
axes[1,-2].scatter(Y_pca[:, 0], Y_pca[:, 1], edgecolors='black', color='gold')
axes[1,-2].set_title(f'Generated Barycenter', fontsize=12)
Y_push_sum = 0.
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(plot_batchsize)
X_pca = pca.transform(X.cpu().detach().numpy())
X_push_pca = pca.transform(D_list[n].push(X).cpu().detach().numpy())
axes[0, n].scatter(X_pca[:, 0], X_pca[:, 1], edgecolors='black')
axes[0, n].set_title(f'Initial distribution {n}', fontsize=12)
axes[1, n].scatter(X_push_pca[:, 0], X_push_pca[:, 1], edgecolors='black', color='orange')
axes[1, n].set_title(f'Pushed distribution {n}', fontsize=12)
if plot_G:
Y_push = D_conj_list[n].push(Y).detach()
Y_push_pca = pca.transform(Y_push.cpu().detach().numpy())
with torch.no_grad():
Y_push_sum += benchmark.alphas[n] * Y_push
axes[2, n].set_title(f'Inverse pushed {n} from generated', fontsize=12)
else:
Y = D_list[n].push(X).detach()
Y.requires_grad_(True)
Y_push_pca = pca.transform(D_conj_list[n].push(Y).cpu().detach().numpy())
axes[2, n].set_title(f'Inverse pushed {n}', fontsize=12)
axes[2, n].scatter(Y_push_pca[:, 0], Y_push_pca[:, 1], edgecolors='black', color='lightblue')
if plot_G:
Y_push_sum_pca = pca.transform(Y_push_sum.cpu().detach().numpy())
axes[2, -2].scatter(Y_push_sum_pca[:, 0], Y_push_sum_pca[:, 1], edgecolors='black', color='red')
axes[2, -2].set_title(f'Generator Target', fontsize=12)
if plot_B:
if benchmark.bar_sampler is not None:
axes[1, -1].set_title(f'True Barycenter', fontsize=12)
axes[2, -1].set_title(f'True Barycenter', fontsize=12)
Y = benchmark.bar_sampler.sample(plot_batchsize).cpu().detach().numpy()
else:
axes[1, -1].set_title(f'Gaussian Barycenter', fontsize=12)
axes[2, -1].set_title(f'Gaussian Barycenter', fontsize=12)
Y = benchmark.gauss_bar_sampler.sample(plot_batchsize).cpu().detach().numpy()
Y_pca = pca.transform(Y)
axes[1, -1].scatter(Y_pca[:, 0], Y_pca[:, 1], edgecolors='black', color='green')
axes[2, -1].scatter(Y_pca[:, 0], Y_pca[:, 1], edgecolors='black', color='green')
gc.collect()
torch.cuda.empty_cache()
return fig, axes
def plot_colored_cloud(cloud, ax):
ax._axis3don = False
colors = (cloud - cloud.min(axis=0)) / (cloud.max(axis=0) - cloud.min(axis=0))
ax.scatter(cloud[:, 0], cloud[:, 1], cloud[:, 2], c=colors)
ax.set_xlabel('Red'); ax.set_ylabel('Green'); ax.set_zlabel('Blue');
def push_img(im, D):
X = (np.asarray(im).transpose(2, 0, 1).reshape(3, -1) / 255.).T
X_pushed = np.zeros_like(X)
pos = 0; batch = 4999
while pos < len(X):
X_pushed[pos:pos+batch] = D.push(
torch.tensor(X[pos:pos+batch], device='cuda', requires_grad=True).float()
).detach().cpu().numpy()
pos += batch
im_pushed = (
np.clip(
(X_pushed.T.reshape(
np.asarray(im).transpose(2, 0, 1).shape
)).transpose(1, 2, 0), 0, 1) * 255
).astype(int)
return im_pushed
def plot_training_phase_palettes(
benchmark, D_list, D_conj_list,
plot_batchsize=250, partsize=(5, 5), dpi=200,
elev=0., azim=40
):
fig, axes = plt.subplots(
3, benchmark.num,
figsize=(partsize[0] * (benchmark.num), 3 * partsize[1]),
sharex=True, sharey=True, dpi=dpi,
subplot_kw=dict(projection='3d')
)
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(plot_batchsize)
X_np = np.clip(X.cpu().detach().numpy(), 0, 1)
X_push_np = np.clip(D_list[n].push(X).cpu().detach().numpy(), 0, 1)
plot_rgb_cloud(X_np, axes[0, n])
axes[0, n].set_title(f'Initial distribution {n}', fontsize=12)
plot_rgb_cloud(X_push_np, axes[1, n])
axes[1, n].set_title(f'Pushed distribution {n}', fontsize=12)
Y = D_list[n].push(X).detach()
Y.requires_grad_(True)
Y_push_np = np.clip(D_conj_list[n].push(Y).cpu().detach().numpy(), 0, 1)
axes[2, n].set_title(f'Inverse pushed {n}', fontsize=12)
plot_rgb_cloud(Y_push_np, axes[2, n])
gc.collect()
torch.cuda.empty_cache()
return fig, axes
def plot_training_phase_im(
imgs, D_list, D_conj_list,
plot_batchsize=250, partsize=(5, 5), dpi=200,
elev=0., azim=40
):
fig, axes = plt.subplots(
3, len(imgs),
figsize=(partsize[0] * (len(imgs)), 3 * partsize[1]),
dpi=dpi
)
for n in range(len(imgs)):
X = imgs[n]
axes[0, n].imshow(X)
axes[0, n].set_title(f'Initial distribution {n}', fontsize=12)
X_push = push_img(X, D_list[n])
axes[1, n].imshow(X_push)
axes[1, n].set_title(f'Pushed distribution {n}', fontsize=12)
X_push_inv = push_img(X_push, D_conj_list[n])
axes[2, n].imshow(X_push_inv)
axes[2, n].set_title(f'Inverse pushed {n}', fontsize=12)
gc.collect()
torch.cuda.empty_cache()
return fig, axes
| 6,593
| 37.115607
| 112
|
py
|
Wasserstein2Barycenters
|
Wasserstein2Barycenters-main/src/tools.py
|
import os, sys
import torchvision.datasets as datasets
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.linalg import sqrtm
import os, sys
import argparse
import collections
from scipy.io import savemat
from tqdm import trange
from torchvision.utils import save_image
from torch.utils.data import DataLoader
import multiprocessing
import itertools
import torch
from PIL import Image
sys.path.append("..")
import gc
def ewma(x, span=200):
return pd.DataFrame({'x': x}).ewm(span=span).mean().values[:, 0]
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def fig2img ( fig ):
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) )
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def score_forward_maps(benchmark, D_list, score_size=1024):
assert (benchmark.bar_maps is not None) and (benchmark.bar_sampler is not None)
L2_UVP = []
Y = benchmark.bar_sampler.sample(score_size).detach()
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(score_size)
X_push = D_list[n].push(X).detach()
with torch.no_grad():
X_push_true = benchmark.bar_maps[n](X)
L2_UVP.append(
100 * (((X_push - X_push_true) ** 2).sum(dim=1).mean() / benchmark.bar_sampler.var).item()
)
return L2_UVP
def score_pushforwards(benchmark, D_list, score_size=128*1024, batch_size=1024):
assert (benchmark.bar_sampler is not None)
BW2_UVP = []
if score_size < batch_size:
batch_size = score_size
num_chunks = score_size // batch_size
for n in range(benchmark.num):
X_push = np.vstack([
D_list[n].push(benchmark.samplers[n].sample(batch_size)).cpu().detach().numpy()
for _ in range(num_chunks)
])
X_push_cov = np.cov(X_push.T)
X_push_mean = np.mean(X_push, axis=0)
UVP = 100 * calculate_frechet_distance(
X_push_mean, X_push_cov,
benchmark.bar_sampler.mean, benchmark.bar_sampler.cov,
) / benchmark.bar_sampler.var
BW2_UVP.append(UVP)
return BW2_UVP
def score_cycle_consistency(benchmark, D_list, D_conj_list, score_size=1024):
cycle_UVP = []
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(score_size)
X_push = D_list[n].push(X).detach()
X_push.requires_grad_(True)
X_push_inv = D_conj_list[n].push(X_push).detach()
with torch.no_grad():
cycle_UVP.append(
100 * (((X - X_push_inv) ** 2).sum(dim=1).mean() / benchmark.samplers[n].var).item()
)
return cycle_UVP
def score_congruence(benchmark, D_conj_list, score_size=1024):
assert benchmark.bar_sampler is not None
Y = benchmark.bar_sampler.sample(score_size)
Y_sum = torch.zeros_like(Y).detach()
for n in range(benchmark.num):
Y_push = D_conj_list[n].push(Y).detach()
with torch.no_grad():
Y_sum += benchmark.alphas[n] * Y_push
return 100 * (((Y - Y_sum) ** 2).sum(dim=1).mean() / benchmark.bar_sampler.var).item()
| 5,715
| 34.725
| 106
|
py
|
Wasserstein2Barycenters
|
Wasserstein2Barycenters-main/src/layers.py
|
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
class ConvexQuadratic(nn.Module):
'''Convex Quadratic Layer'''
__constants__ = ['in_features', 'out_features', 'quadratic_decomposed', 'weight', 'bias']
def __init__(self, in_features, out_features, bias=True, rank=1):
super(ConvexQuadratic, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.rank = rank
self.quadratic_decomposed = nn.Parameter(torch.Tensor(
torch.randn(in_features, rank, out_features)
))
self.weight = nn.Parameter(torch.Tensor(
torch.randn(out_features, in_features)
))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, input):
quad = ((input.matmul(self.quadratic_decomposed.transpose(1,0)).transpose(1, 0)) ** 2).sum(dim=1)
linear = F.linear(input, self.weight, self.bias)
return quad + linear
class View(nn.Module):
def __init__(self, *shape):
super(View, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(-1, *self.shape)
class Conv2dConvexQuadratic(nn.Module):
'''Convolutional Input-Convex Quadratic Layer'''
def __init__(
self, in_channels, out_channels, kernel_size, rank,
stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'
):
super(Conv2dConvexQuadratic, self).__init__()
assert rank > 0
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.rank = rank
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.quadratic_decomposed = nn.Conv2d(
in_channels, out_channels * rank, kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
bias=False,
padding_mode=self.padding_mode
)
self.linear = nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
bias=self.bias,
padding_mode=self.padding_mode
)
def forward(self, input):
output = (self.quadratic_decomposed(input) ** 2)
n, c, h, w = output.size()
output = output.reshape(n, c // self.rank, self.rank, h, w).sum(2)
output += self.linear(input)
return output
| 2,893
| 33.047059
| 105
|
py
|
Wasserstein2Barycenters
|
Wasserstein2Barycenters-main/src/icnn.py
|
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from .layers import ConvexQuadratic, Conv2dConvexQuadratic
class DenseICNN(nn.Module):
'''Fully Conncted ICNN with input-quadratic skip connections'''
def __init__(
self, in_dim,
hidden_layer_sizes=[32, 32, 32],
rank=1, activation='celu', dropout=0.03,
strong_convexity=1e-6
):
super(DenseICNN, self).__init__()
self.strong_convexity = strong_convexity
self.hidden_layer_sizes = hidden_layer_sizes
self.droput = dropout
self.activation = activation
self.rank = rank
self.quadratic_layers = nn.ModuleList([
nn.Sequential(
ConvexQuadratic(in_dim, out_features, rank=rank, bias=True),
nn.Dropout(dropout)
)
for out_features in hidden_layer_sizes
])
sizes = zip(hidden_layer_sizes[:-1], hidden_layer_sizes[1:])
self.convex_layers = nn.ModuleList([
nn.Sequential(
nn.Linear(in_features, out_features, bias=False),
nn.Dropout(dropout)
)
for (in_features, out_features) in sizes
])
self.final_layer = nn.Linear(hidden_layer_sizes[-1], 1, bias=False)
def forward(self, input):
output = self.quadratic_layers[0](input)
for quadratic_layer, convex_layer in zip(self.quadratic_layers[1:], self.convex_layers):
output = convex_layer(output) + quadratic_layer(input)
if self.activation == 'celu':
output = torch.celu(output)
elif self.activation == 'softplus':
output = F.softplus(output)
elif self.activation == 'relu':
output = F.relu(output)
else:
raise Exception('Activation is not specified or unknown.')
return self.final_layer(output) + .5 * self.strong_convexity * (input ** 2).sum(dim=1).reshape(-1, 1)
def push(self, input):
output = autograd.grad(
outputs=self.forward(input), inputs=input,
create_graph=True, retain_graph=True,
only_inputs=True,
grad_outputs=torch.ones((input.size()[0], 1)).cuda().float()
)[0]
return output
def convexify(self):
for layer in self.convex_layers:
for sublayer in layer:
if (isinstance(sublayer, nn.Linear)):
sublayer.weight.data.clamp_(0)
self.final_layer.weight.data.clamp_(0)
class View(nn.Module):
def __init__(self, *shape):
super(View, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(-1, *self.shape)
class ConvICNN128(nn.Module):
def __init__(self, channels=3):
super(ConvICNN128, self).__init__()
self.first_linear = nn.Sequential(
nn.Conv2d(channels, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
)
self.first_squared = nn.Sequential(
nn.Conv2d(channels, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
)
self.convex = nn.Sequential(
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
View(32* 8 * 8),
nn.CELU(),
nn.Linear(32 * 8 * 8, 128),
nn.CELU(),
nn.Linear(128, 64),
nn.CELU(),
nn.Linear(64, 32),
nn.CELU(),
nn.Linear(32, 1),
View()
).cuda()
def forward(self, input):
output = self.first_linear(input) + self.first_squared(input) ** 2
output = self.convex(output)
return output
def push(self, input):
return autograd.grad(
outputs=self.forward(input), inputs=input,
create_graph=True, retain_graph=True,
only_inputs=True, grad_outputs=torch.ones(input.size()[0]).cuda().float()
)[0]
def convexify(self):
for layer in self.convex:
if (isinstance(layer, nn.Linear)) or (isinstance(layer, nn.Conv2d)):
layer.weight.data.clamp_(0)
class ConvICNN16(nn.Module):
'''
ConvICNN for 1 x 16 x 16 images.
Several convolutional layers with input-quadratic convolutional skip connections are
followed by positive fully-connected layers.
'''
def __init__(self, strong_convexity=0.01, dropout=0.01, rank=1, unflatten=True):
super(ConvICNN16, self).__init__()
self.strong_convexity = strong_convexity
self.dropout = dropout
self.rank = rank
self.unflatten = unflatten
self.convex_layers = nn.ModuleList([
nn.Conv2d(512, 512, 3, padding=1, stride=2), # bs x 256 x 8 x 8
nn.Conv2d(512, 512, 3, padding=1, stride=2), # bs x 256 x 8 x 8
])
self.quadratic_layers = nn.ModuleList([
Conv2dConvexQuadratic(1, 512, 5, rank=self.rank, padding=2, stride=1, bias=False), # bs x 128 x 16 x16
Conv2dConvexQuadratic(1, 512, 7, rank=self.rank, padding=3, stride=2, bias=False), # bs x 128 x 8 x 8
Conv2dConvexQuadratic(1, 512, 9, rank=self.rank, padding=4, stride=4, bias=False), # bs x 128 x 8 x 8
])
self.pos_features = nn.Sequential(
nn.Dropout2d(self.dropout),
nn.Conv2d(512, 256, 4, padding=1, stride=2),
nn.CELU(0.2, True),
nn.Dropout2d(self.dropout),
nn.Conv2d(256, 1, 2, padding=0, stride=1), # bs x 1 x 1 x 1
View(1),
)
# img = torch.randn(5, 1, 16, 16)
# print(self(img).shape)
# print('Input Quadratic Convolutions Output shapes')
# for layer in self.quadratic_layers:
# print(layer(img).shape)
# print('Sequential Convolutions Output shapes\nEmpty')
# img = self.quadratic_layers[0](img)
# for layer in self.convex_layers:
# img = layer(img)
# print(img.shape)
# print('Final Shape')
# print(self.pos_features(img).shape)
def forward(self, input):
if self.unflatten:
input = input.reshape(-1, 1, 16, 16)
output = self.quadratic_layers[0](input)
for quadratic_layer, convex_layer in zip(self.quadratic_layers[1:], self.convex_layers):
output = convex_layer(output) + quadratic_layer(input)
output = torch.celu(output)
if self.training:
output = F.dropout2d(output, p=self.dropout)
output = self.pos_features(output)
return output + .5 * self.strong_convexity * (input ** 2).flatten(start_dim=1).sum(dim=1).reshape(-1, 1)
def push(self, input):
output = autograd.grad(
outputs=self.forward(input), inputs=input,
create_graph=True, retain_graph=True,
only_inputs=True,
grad_outputs=torch.ones((input.size()[0], 1)).cuda().float()
)[0]
return output
def convexify(self):
for layer in self.convex_layers:
if (isinstance(layer, nn.Linear)) or (isinstance(layer, nn.Conv2d)):
layer.weight.data.clamp_(0)
for layer in self.pos_features:
if (isinstance(layer, nn.Linear)) or (isinstance(layer, nn.Conv2d)):
layer.weight.data.clamp_(0)
| 8,460
| 37.990783
| 115
|
py
|
zind
|
zind-main/download_data.py
|
import argparse
import hashlib
import json
import logging
import os
import sys
import time
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from typing import Dict, List
import requests
from tqdm import tqdm as progress_bar
LOG_FILE_NAME = "zind.log"
BRIDGE_API_URL = (
"https://api.bridgedataoutput.com/api/v2/OData/zgindoor/Indoor/replication"
)
MAX_NUM_RETRIES = 3
JSON_REQUESTS_TIMEOUT = 120 # 120 seconds, will double after every retry
IMAGE_REQUESTS_TIMEOUT = 60 # 60 seconds, will double after every retry
DOWNLOAD_STATUS_FILENAME = "download_status.json"
# TODO Update size based on entire dataset, store ZInD version for the stats
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
def write_dict_to_json(value: Dict, dest_path: str):
"""
Writes the value to a json
:param value: Dict to write
:param dest_path: Path to write json
"""
with open(dest_path, "w") as f:
json.dump(value, f)
def create_folder(path: str):
"""
Creates folder if it does not exist
:param path: folder path
"""
if not os.path.isdir(path):
os.mkdir(path)
def calculate_checksum(dest_path: str):
"""
Calculate image checksum.
:param dest_path: Path to the image.
:return: MD5 checksum
"""
with open(dest_path, "rb") as file_to_check:
img_data = file_to_check.read()
md5_returned = hashlib.md5(img_data).hexdigest()
return md5_returned
def keep_required_keys(local_house_dict: Dict) -> Dict:
"""
Only retains the required keys in the house details dict stored on disk
:param local_house_dict: House details
:return: Required House details
"""
keys_to_keep = [
"merger",
"redraw",
"scale_meters_per_coordinate",
"floorplan_to_redraw_transformation",
]
res = {}
for key, value in local_house_dict.items():
if key in keys_to_keep:
res[key] = value
return res
def run_imap_unordered_multiprocessing(func, argument_list, num_processes) -> List:
"""
Runs functions in parallel using imap_unordered multiprocessing
:param func: Function to run
:param argument_list: List of arguments to function
:param num_processes: Number of processes to be used
:return: result of the function
"""
pool = ThreadPool(processes=num_processes)
result_list_tqdm = []
for result in progress_bar(
pool.imap_unordered(func=func, iterable=argument_list), total=len(argument_list)
):
result_list_tqdm.append(result)
return result_list_tqdm
def process_house(details: Dict, output_folder: str) -> List:
"""
Create required folder structure to store images for the house
Store details about the house in zind_data.json
:param details: house details dict from ZInD response json
:param output_folder: Folder to download images to
:return: [(url,dest_path,checksum)] List of images to download for house
"""
house_number = details["home_id"]
house_path = os.path.join(output_folder, house_number)
panos_path = os.path.join(house_path, "panos")
floor_plan_path = os.path.join(house_path, "floor_plans")
local_house_details = dict(details)
create_folder(house_path)
create_folder(panos_path)
create_folder(floor_plan_path)
all_images_to_download = []
for floor_name, floor_details in details["merger"].items():
for complete_room_name, complete_room_details in floor_details.items():
for (
partial_room_name,
partial_room_details,
) in complete_room_details.items():
for pano_name, pano_details in partial_room_details.items():
pano_url = pano_details["image_path"]
pano_dest_name = f"{floor_name}_{partial_room_name}_{pano_name}.jpg"
pano_dest_path = os.path.join(panos_path, pano_dest_name)
pano_checksum = pano_details["checksum"]
all_images_to_download.append(
(pano_url, pano_dest_path, pano_checksum)
)
local_house_details["merger"][floor_name][complete_room_name][
partial_room_name
][pano_name]["image_path"] = f"panos/{pano_dest_name}"
for floor_name, floor_details in details[
"floorplan_to_redraw_transformation"
].items():
image_url = floor_details["image_path"]
floor_plan_relative_path = os.path.join("floor_plans", f"{floor_name}.png")
local_house_details["floorplan_to_redraw_transformation"][floor_name][
"image_path"
] = floor_plan_relative_path
floor_plan_path = os.path.join(house_path, floor_plan_relative_path)
floor_plan_checksum = floor_details["checksum"]
all_images_to_download.append([image_url, floor_plan_path, floor_plan_checksum])
house_details_path = os.path.join(house_path, "zind_data.json")
local_house_details = keep_required_keys(local_house_details)
write_dict_to_json(local_house_details, house_details_path)
return all_images_to_download
def download_json_in_chunks(zind_url, headers, payload, dest_path):
"""Helper function to download the large ZInD json in chunks.
:param zind_url: The Bridge APU url
:param headers: The requests headers including authentication
:param payload: Any requests payload fields
:param dest_path: Where the json will be saved to (for future use)
:return: The parses ZInD json as python dict
"""
response = requests.get(
zind_url,
stream=True,
headers=headers,
data=payload,
timeout=JSON_REQUESTS_TIMEOUT,
)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1024 # 1 Kb
all_chunks = []
with open(dest_path, "wb") as file:
with progress_bar(
total=total_size_in_bytes,
unit="iB",
unit_scale=True,
desc="Downloading ZInD json",
) as pbar:
for data in response.iter_content(block_size):
pbar.update(len(data))
file.write(data)
all_chunks.append(data)
full_content = b"".join(all_chunks)
logger.info(f"Start loading ZInD json")
result_dict = json.loads(full_content)
logger.info(f"Done loading ZInD json")
return result_dict
def get_zind_json(server_token, output_folder) -> Dict:
"""
Returns the dict for the ZInD json.
Sends a request to the BridgeAPI to get details about the ZInD Dataset
Stores the respose json file in output folder
:param server_token: token for access to the API
:param output_folder: path to store response
:return: ZInD Dict
"""
dest_path = os.path.join(output_folder, "zind_response.json")
result_dict = {}
value_key = "value"
if os.path.exists(dest_path):
logger.info(f"Loading ZInD json from {dest_path}")
try:
result_dict = json.load(open(dest_path))
logger.info("Loaded ZInD json successfully")
except Exception as e:
logger.info(f"ZInD json invalid, re-downloading file: {e}")
zind_url = BRIDGE_API_URL
bearer_token = f"Bearer {server_token}"
payload = {}
headers = {"Authorization": bearer_token}
for retry_count in (1, MAX_NUM_RETRIES + 1):
if value_key in result_dict:
break
logger.info(
f"Retrieving ZInD json (attempt {retry_count} out of {MAX_NUM_RETRIES})"
)
result_dict = download_json_in_chunks(zind_url, headers, payload, dest_path)
logger.info("Downloaded ZInD json successfully")
else:
logger.error(
"Could not download ZInD json, please check your credentials and internet connection"
)
return None
return result_dict[value_key]
def download_image(url_dest_path) -> bool:
"""
Downloads file from the url and stores it at the dest path
:param url_dest_path: [url,dest_path,checksum]
:return: Returns True is file downloaded successfully
"""
# Check if file exists is outside the function as we don't want to start another process
# if file already exists
url, dest_path, checksum = url_dest_path
md5_returned = None
for retry_count in range(1, MAX_NUM_RETRIES + 1):
try:
response = requests.get(
url, stream=True, timeout=retry_count * IMAGE_REQUESTS_TIMEOUT
)
if response.status_code == requests.codes.ok:
with open(dest_path, "wb") as f:
for data in response:
f.write(data)
md5_returned = calculate_checksum(dest_path)
except Exception as e:
logger.debug(
f"Exception raised when downloading image {url} to {dest_path}: {str(e)}"
)
else:
if response.status_code == requests.codes.ok and md5_returned == checksum:
break
else:
logger.debug(
f"Verification failed when downloading image {url} to {dest_path}: {response}"
)
logger.debug(f"Retry {retry_count} downloading image {url} to {dest_path}")
time.sleep(retry_count * 30)
else:
logger.debug(
f"Failed to download image {url} to {dest_path} after {MAX_NUM_RETRIES} attempts."
)
if not os.path.exists(dest_path):
logger.error(f"Failed to download image {url} to {dest_path}")
return False
if md5_returned != checksum:
logger.error(
f"Checksum validation failed for {url} to {dest_path}: {md5_returned} vs {checksum}"
)
return False
if retry_count > 1:
logger.debug(
f"Successfully downloaded image {url} to {dest_path} after {retry_count - 1} attempt(s)."
)
logger.debug(f"Successfully downloaded & verified image from: {url} to {dest_path}")
return True
def create_folder_structure_and_image_download_list(
houses: List, output_folder: str, partial_download_percentage: float
) -> List:
"""
Create folders and zind_data files for all the houses in the output folder.
Store on disk and return list of images to download
:param houses: List of Houses returned in the ZInD Dict
:param output_folder: Path to store dataset
:param partial_download_percentage: Record percentage downloaded to ensure complete dataset is downloaded
If this value is 100% entire dataset is to be downloaded
:return: List[[url, dest_path]] List of all images to download for entire dataset
"""
download_status_path = os.path.join(output_folder, DOWNLOAD_STATUS_FILENAME)
download_percentage_key = "partial_download_percentage"
files_list_key = "files_list"
if os.path.exists(download_status_path):
logger.info("Folder structure already exists, loading file list")
download_status = json.load(open(download_status_path))
if download_status[download_percentage_key] == partial_download_percentage:
return download_status[files_list_key]
all_images_to_download = []
with progress_bar(
total=len(houses), desc="Preparing the ZInD folder structure"
) as pbar:
for house_details in houses:
images_to_download = process_house(house_details, output_folder)
all_images_to_download = all_images_to_download + images_to_download
pbar.update(1)
download_status = {
files_list_key: all_images_to_download,
download_percentage_key: partial_download_percentage,
}
logger.info("Required ZInD folder structure has been created")
write_dict_to_json(download_status, download_status_path)
return all_images_to_download
def check_files_left_to_download(image_list: List) -> List:
"""
Go through entire list of images in dataset and check which ones have already been downloaded.
Ensure they have correct md5 checksums, and if not we will re-try to download those.
:param image_list: List of images in dataset
:return: List of images left to download
"""
images_left_to_download = []
with progress_bar(total=len(image_list), desc="Verifying downloaded data") as pbar:
for image_url, dest_path, checksum in image_list:
if not os.path.exists(dest_path):
images_left_to_download.append((image_url, dest_path, checksum))
else:
md5_returned = calculate_checksum(dest_path)
if md5_returned != checksum:
images_left_to_download.append((image_url, dest_path, checksum))
logger.warning(f"Image is invalid, re-downloading {dest_path}")
pbar.update(1)
return images_left_to_download
def download_all(
output_folder: str,
server_token: str,
num_process: int,
partial_download_percentage=100.0,
):
"""
Download all images in the ZInD dataset
:param output_folder: Folder to store dataset
:param server_token: Token to access API
:param num_process: Num of process to use while downloading in parallel
:param partial_download_percentage: Percentage of houses to download
"""
houses = get_zind_json(server_token, output_folder)
if houses is None:
return
houses = partial_download(partial_download_percentage, houses)
all_images_to_download = create_folder_structure_and_image_download_list(
houses, output_folder, partial_download_percentage
)
total_num_images = len(all_images_to_download)
images_remaining = check_files_left_to_download(all_images_to_download)
successful_downloads = 0
if len(images_remaining) > 0:
if len(images_remaining) != total_num_images:
logger.info(
"{:.2f}% images left, now resuming to download the rest of the images".format(
len(images_remaining) * 100 / total_num_images
)
)
logger.info(f"Images to download: {len(images_remaining)}/{total_num_images}")
results = run_imap_unordered_multiprocessing(
download_image, images_remaining, num_process
)
for r in results:
if r:
successful_downloads += 1
if successful_downloads == len(images_remaining):
logger.info(f"All {total_num_images} images downloaded & verified successfully")
else:
# This situation should be rare and users are advised to retry.
logger.info(
f"There were issues with downloading the data, {len(all_images_to_download) - successful_downloads} images"
f" left to download. Please, retry running the download script."
)
def partial_download(percentage_to_download: float, houses: List) -> List:
total_num_houses = len(houses)
num_houses_to_download = max(
1, int((percentage_to_download / 100) * total_num_houses)
)
return houses[:num_houses_to_download]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--server_token", "-s", help="Server token for ZInD Access", required=True
)
parser.add_argument(
"--output_folder",
"-o",
help="Output folder to store downloaded images",
required=True,
)
parser.add_argument(
"--num_process",
"-n",
help="Number of process to use, by default uses maximum available",
type=int,
default=cpu_count(),
)
parser.add_argument(
"-v", "--verbose", help="Show debug information", action="store_true"
)
parser.add_argument(
"--partial_download_percentage",
"-p",
help="Percentage of houses to download from dataset",
type=float,
default=100.0,
)
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
logger_path = os.path.join(args.output_folder, LOG_FILE_NAME)
create_folder(args.output_folder)
# Create file handler which always logs debug messages.
fh = logging.FileHandler(logger_path)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
download_all(
args.output_folder,
args.server_token,
args.num_process,
args.partial_download_percentage,
)
if __name__ == "__main__":
main()
| 16,737
| 33.654244
| 119
|
py
|
zind
|
zind-main/code/partition_zind.py
|
# """ Script to partition ZInD to train/val/test (0.8 : 0.1 : 0.1).
# The train/val/test splits will have similar distributions under the following metrics:
# 1. Layout complexities (cuboid, L-shape, etc.)
# 2. Number of floors
# 3. Number of primary panoramas
# 4. Number of secondary panoramas
# 5. Total area (to ensure that we have good balance between small/large homes)
# Example usage:
# python partition_zind.py -i <input_folder> -o <output_folder>
#
import argparse
import json
import os
import random
import numpy as np
from tqdm import tqdm
from shapely.geometry import Polygon
import cv2
LAYOUT_TYPES_DICT = {'CUBOID': 0,
'MANHATTAN_L': 1,
'MANHATTAN_GENERAL': 2,
'NON_MANHATTAN': 3}
def angle_between_vectors(vector1, vector2):
"""Return the counterclockwise angle between vector1 and vector2."""
unit_v1 = vector1 / np.linalg.norm(vector1)
unit_v2 = vector2 / np.linalg.norm(vector2)
rotation_angle = np.arccos(np.clip(np.dot(unit_v1, unit_v2), -1, 1))
return rotation_angle
def get_angle_distribution(room_vertices):
"""Get the distribution of angles for a room shape."""
num_vertices = room_vertices.shape[0]
angles = []
for segment_idx in range(num_vertices):
v1 = room_vertices[(segment_idx+1)%num_vertices,:] - room_vertices[segment_idx%num_vertices,:]
v2 = room_vertices[(segment_idx-1)%num_vertices,:] - room_vertices[segment_idx%num_vertices,:]
angle = np.rad2deg(angle_between_vectors(v1, v2))
angles.append(angle)
return angles
def get_layout_type(room_vertices, threshold_degrees=10):
"""
Get layout type: CUBOID; MANHATTAN_L; MANHATTAN_GENERAL; NON_MANHATTAN.
:param room_vertices: Coordinates of room vertices.
:param threshold_degrees: Threshold to determine if a wall angle is NON_MANHATTAN.
:return: Room layout type.
"""
num_vertices = room_vertices.shape[0]
angles = get_angle_distribution(room_vertices)
angles_mod = np.mod(angles, 90)
deviation_from_manhattan = np.minimum(angles_mod,
90 - angles_mod)
non_manhattan_angles = deviation_from_manhattan > threshold_degrees
if np.any(non_manhattan_angles):
layout_type = 'NON_MANHATTAN'
elif num_vertices == 4:
layout_type = 'CUBOID'
elif num_vertices == 6:
layout_type = 'MANHATTAN_L'
else:
layout_type = 'MANHATTAN_GENERAL'
return layout_type
def collect_stats(input_folder, tour_ids_split):
"""Collect stats for input home tours.
:param input_folder: The folder contains the json files for home tours.
:param tour_ids_split: Tour ids for train, val, or test.
:return: Home tours stats.
"""
num_floors_list = []
total_sqm_list = []
num_primary_pano_list = []
num_secondary_pano_list = []
layout_type_list = []
for tour_id in tqdm(tour_ids_split):
tour_json_path = os.path.join(input_folder, tour_id, "zind_data.json")
with open(tour_json_path, "r") as fh:
zillow_json_dict = json.load(fh)
num_floors = 0
total_sqm = 0
num_primary_panos = 0
num_secondary_panos = 0
invalid_floor_scale = False
if "merger" in zillow_json_dict:
for floor_id, floor_data in zillow_json_dict["merger"].items():
num_floors += 1
scale = zillow_json_dict["scale_meters_per_coordinate"][floor_id]
if scale is None:
invalid_floor_scale = True
for complete_room_data in floor_data.values():
for partial_room_data in complete_room_data.values():
for pano_data in partial_room_data.values():
if pano_data["is_primary"]:
num_primary_panos += 1
layout_type = get_layout_type(np.array(pano_data["layout_raw"]["vertices"]))
layout_type_list.append(LAYOUT_TYPES_DICT[layout_type])
if scale is not None:
room_scale = pano_data["floor_plan_transformation"]["scale"]
room_vertices_scaled = [[vertex[0] * room_scale * scale, vertex[1] * room_scale * scale] for vertex in pano_data["layout_raw"]["vertices"]]
room_polygon = Polygon(room_vertices_scaled)
total_sqm += room_polygon.area
else:
num_secondary_panos += 1
num_floors_list.append(num_floors)
if not invalid_floor_scale:
total_sqm_list.append(total_sqm)
num_primary_pano_list.append(num_primary_panos)
num_secondary_pano_list.append(num_secondary_panos)
return [num_floors_list, total_sqm_list, num_primary_pano_list, num_secondary_pano_list, layout_type_list]
def main():
parser = argparse.ArgumentParser(description="Partition Zillow Indoor Dataset (ZInD)")
parser.add_argument(
"--input", "-i", help="Input folder contains all the home tours.", required=True
)
parser.add_argument(
"--output", "-o", help="Output folder where zind_partition.json will be saved to", required=True
)
args = parser.parse_args()
input_folder = args.input
output_folder = args.output
os.makedirs(output_folder, exist_ok=True)
tour_ids =[tour_id for tour_id in os.listdir(input_folder) if os.path.isdir(os.path.join(input_folder, tour_id))]
num_tours = len(tour_ids)
train_ratio = 0.8
val_ratio = 0.1
test_ratio = 0.1
assert train_ratio + val_ratio + test_ratio == 1
num_tours_train = int(num_tours * train_ratio)
num_tours_val = int(num_tours * val_ratio)
best_hist_score = 0
hist_score_list = []
bins_list = [4, 10, 10, 10, 4]
for _ in range(50):
random.shuffle(tour_ids)
tour_ids_train = tour_ids[:num_tours_train]
tour_ids_val = tour_ids[num_tours_train:num_tours_train + num_tours_val]
tour_ids_test = tour_ids[num_tours_train + num_tours_val:]
stats_list_train = collect_stats(input_folder, tour_ids_train)
stats_list_val = collect_stats(input_folder, tour_ids_val)
stats_list_test = collect_stats(input_folder, tour_ids_test)
hist_score = 0
for stats_train, stats_val, stats_test, bins in zip(stats_list_train, stats_list_val, stats_list_test, bins_list):
max_val = max(max(stats_train), max(stats_val), max(stats_test))
min_val = min(min(stats_train), min(stats_val), min(stats_test))
hist_train, _ = np.histogram(stats_train, bins=bins, range=(min_val, max_val), density=True)
hist_val, _ = np.histogram(stats_val, bins=bins, range=(min_val, max_val), density=True)
hist_test, _ = np.histogram(stats_test, bins=bins, range=(min_val, max_val), density=True)
hist_score_train_val = cv2.compareHist(hist_train.astype(np.float32), hist_val.astype(np.float32), method=0)
hist_score_train_test = cv2.compareHist(hist_train.astype(np.float32), hist_test.astype(np.float32), method=0)
hist_score_val_test = cv2.compareHist(hist_val.astype(np.float32), hist_test.astype(np.float32), method=0)
hist_score_train_val_test = (hist_score_train_val + hist_score_train_test + hist_score_val_test) / 3
hist_score += hist_score_train_val_test
hist_score_list.append(hist_score)
if hist_score > best_hist_score:
best_hist_score = hist_score
best_tour_ids_train = tour_ids_train
best_tour_ids_val = tour_ids_val
best_tour_ids_test = tour_ids_test
# save zind partition
zind_partition = {
"train": best_tour_ids_train,
"val": best_tour_ids_val,
"test": best_tour_ids_test,
}
with open(os.path.join(args.output, "zind_partition.json"), "w") as fh:
json.dump(zind_partition, fh)
if __name__ == "__main__":
main()
| 8,190
| 39.349754
| 175
|
py
|
zind
|
zind-main/code/visualize_zind_cli.py
|
# """CLI script to visualize & validate data for the public-facing Zillow Indoor Dataset (ZInD).
#
# Validation includes:
# (1) required JSON fields are presented
# (2) verify non self-intersection of room floor_plan_layouts
# (3) verify that windows/doors/openings lie on the room layout geometry
# (4) verify that windows/doors/openings are defined by two points (left/right boundaries)
# (5) verify that panos_layouts are RGB images with valid FoV ratio (2:1)
#
# Visualization includes:
# (1) render the top-down floor map projection: merged room floor_plan_layouts,WDO and camera centers
# (2) render the room floor_plan_layouts and windows/doors/openings on the pano
#
# Example usage (1): Render all layouts on primary and secondary panos.
# python visualize_zind_cli.py -i <input_folder> -o <output_folder> --visualize-layout --visualize-floor-plan \
# --raw --complete --visible --primary --secondary
#
# Example usage (2): Render all vector layouts using merger (based on raw or complete) and the final redraw layouts.
# python visualize_zind_cli.py -i <input_folder> -o <output_folder> --visualize-floor-plan --redraw --complete --raw
#
# Example usage (3): Render the raster to vector alignments using merger (based on raw or complete) and final redraw.
# python visualize_zind_cli.py -i <input_folder> -o <output_folder> --visualize-raster --redraw --complete --raw
#
import argparse
import logging
import os
import sys
import traceback
from pathlib import Path
from typing import Dict, Any
from floor_plan import FloorPlan
from render import (
render_room_vertices_on_panos,
render_jpg_image,
render_raster_to_vector_alignment,
)
from tqdm import tqdm
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
RENDER_FOLDER = "render_data"
def validate_and_render(
zillow_floor_plan: "FloorPlan",
*,
input_folder: str,
output_folder: str,
args: Dict[str, Any]
):
"""Validate and render various ZInD elements, e.g.
1. Primary/secondary layout and WDO
2. Raw/complete/visible layouts
3. Top-down merger results (draft floor-plan)
4. Top-down redraw results (final floor-plan)
5. Raster to vector alignment results.
:param zillow_floor_plan: ZInD floor plan object.
:param input_folder: Input folder of the current tour.
:param output_folder: Folder where the renderings will be saved.
:param args: Input arguments to the script.
:return: None
"""
# Get the types of floor_plan_layouts that we should render.
geometry_to_visualize = []
if args.raw:
geometry_to_visualize.append("raw")
if args.complete:
geometry_to_visualize.append("complete")
if args.visible:
geometry_to_visualize.append("visible")
if args.redraw:
geometry_to_visualize.append("redraw")
# Get the types of panos_layouts that we should render.
panos_to_visualize = []
if args.primary:
panos_to_visualize.append("primary")
if args.secondary:
panos_to_visualize.append("secondary")
# Render the room shape layouts + WDO on top of the pano textures.
if args.visualize_layout:
for geometry_type in geometry_to_visualize:
if geometry_type == "redraw":
continue
for pano_type in panos_to_visualize:
output_folder_layout = os.path.join(
output_folder, "layout", geometry_type, pano_type
)
os.makedirs(output_folder_layout, exist_ok=True)
panos_list = zillow_floor_plan.panos_layouts[geometry_type][pano_type]
render_room_vertices_on_panos(
input_folder=zillow_floor_plan.input_folder,
panos_list=panos_list,
output_folder=output_folder_layout,
)
# Render the top-down draft floor plan, result of the merger stage.
if args.visualize_floor_plan:
output_folder_floor_plan = os.path.join(output_folder, "floor_plan")
os.makedirs(output_folder_floor_plan, exist_ok=True)
for geometry_type in geometry_to_visualize:
if geometry_type == "visible":
continue
zind_dict = zillow_floor_plan.floor_plan_layouts[geometry_type]
for floor_id, zind_poly_list in zind_dict.items():
output_file_name = os.path.join(
output_folder_floor_plan,
"vector_{}_layout_{}.jpg".format(geometry_type, floor_id),
)
render_jpg_image(
polygon_list=zind_poly_list, jpg_file_name=output_file_name
)
# Render vector geometry on top of the raster floor plan image.
if args.visualize_raster:
output_folder_floor_plan_alignment = os.path.join(
output_folder, "floor_plan_raster_to_vector_alignment"
)
os.makedirs(output_folder_floor_plan_alignment, exist_ok=True)
for geometry_type in geometry_to_visualize:
if geometry_type == "visible":
continue
for (
floor_id,
raster_to_vector_transformation,
) in zillow_floor_plan.floor_plan_to_redraw_transformation.items():
floor_plan_image_path = os.path.join(
input_folder, zillow_floor_plan.floor_plan_image_path[floor_id]
)
zind_poly_list = zillow_floor_plan.floor_plan_layouts[geometry_type][
floor_id
]
output_file_name = os.path.join(
output_folder_floor_plan_alignment,
"raster_to_vector_{}_layout_{}.jpg".format(geometry_type, floor_id),
)
render_raster_to_vector_alignment(
zind_poly_list,
raster_to_vector_transformation,
floor_plan_image_path,
output_file_name,
)
def main():
parser = argparse.ArgumentParser(
description="Visualize & validate Zillow Indoor Dataset (ZInD)"
)
parser.add_argument(
"--input",
"-i",
help="Input JSON file (or folder with ZInD data)",
required=True,
)
parser.add_argument(
"--output",
"-o",
help="Output folder where rendered data will be saved to",
required=True,
)
parser.add_argument(
"--visualize-layout",
action="store_true",
help="Render room vertices and WDO on panoramas.",
)
parser.add_argument(
"--visualize-floor-plan",
action="store_true",
help="Render the floor plans as top-down projections with floor plan layouts and WDO elements.",
)
parser.add_argument(
"--visualize-raster",
action="store_true",
help="Render the vector floor plan (draft or final) on the raster floor plan image.",
)
parser.add_argument(
"--max-tours", default=float("inf"), help="Max tours to process."
)
parser.add_argument(
"--primary", action="store_true", help="Visualize primary panoramas."
)
parser.add_argument(
"--secondary", action="store_true", help="Visualize secondary panoramas."
)
parser.add_argument("--raw", action="store_true", help="Visualize raw layout.")
parser.add_argument(
"--complete", action="store_true", help="Visualize complete layout."
)
parser.add_argument(
"--visible", action="store_true", help="Visualize visible layout."
)
parser.add_argument(
"--redraw", action="store_true", help="Visualize 2D redraw geometry."
)
parser.add_argument(
"--debug", "-d", action="store_true", help="Set log level to DEBUG"
)
args = parser.parse_args()
if args.debug:
LOG.setLevel(logging.DEBUG)
input = args.input
# Useful to debug, by restricting the number of tours to process.
max_tours_to_process = args.max_tours
# Collect all the feasible input JSON files.
input_files_list = [input]
if Path(input).is_dir():
input_files_list = sorted(Path(input).glob("**/zind_data.json"))
num_failed = 0
num_success = 0
failed_tours = []
for input_file in tqdm(input_files_list, desc="Validating ZInD data"):
# Try loading and validating the file.
try:
zillow_floor_plan = FloorPlan(input_file)
current_input_folder = os.path.join(str(Path(input_file).parent))
current_output_folder = os.path.join(
args.output, RENDER_FOLDER, str(Path(input_file).parent.stem)
)
os.makedirs(current_output_folder, exist_ok=True)
validate_and_render(
zillow_floor_plan,
input_folder=current_input_folder,
output_folder=current_output_folder,
args=args,
)
num_success += 1
if num_success >= max_tours_to_process:
LOG.info("Max tours to process reached {}".format(num_success))
break
except Exception as ex:
failed_tours.append(str(Path(input_file).parent.stem))
num_failed += 1
track = traceback.format_exc()
LOG.warning("Error validating {}: {}".format(input_file, str(ex)))
LOG.debug(track)
continue
if num_failed > 0:
LOG.warning("Failed to validate: {}".format(num_failed))
LOG.debug("Failed_tours: {}".format(failed_tours))
else:
LOG.info("All ZInD validated successfully")
if __name__ == "__main__":
main()
| 9,753
| 33.711744
| 117
|
py
|
zind
|
zind-main/code/render.py
|
"""
This module contains some common rendering routines.
"""
import itertools
import logging
import os
import sys
from typing import List, Dict, Any
import cv2
import numpy as np
from pano_image import PanoImage
from transformations import TransformationSpherical, Transformation3D
from utils import Polygon, PolygonType
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
# Default parameters when drawing ZInD floor plans.
DEFAULT_LINE_THICKNESS = 4
DEFAULT_RENDER_RESOLUTION = 2048
# Polygon colors when we render the floor map as a JPG image.
POLYGON_COLOR = {
PolygonType.ROOM: (0, 0, 0), # Black
PolygonType.WINDOW: (0, 255, 255), # Cyan
PolygonType.DOOR: (255, 255, 0), # Yellow-ish
PolygonType.OPENING: (0, 0, 255), # Blue
PolygonType.PRIMARY_CAMERA: (255, 64, 255), # Purple
PolygonType.SECONDARY_CAMERA: (0, 128, 0), # Green
PolygonType.PIN_LABEL: (0, 0, 0), # Black
}
def render_jpg_image(
polygon_list: List[Polygon],
*,
jpg_file_name: str = None,
thickness: int = DEFAULT_LINE_THICKNESS,
output_width: int = DEFAULT_RENDER_RESOLUTION
):
"""Render a set of ZInD polygon objects to an image that can be saved to the file system.
:param polygon_list: List of Polygon objects.
:param jpg_file_name: File name to save the image to (if None we won't save).
:param thickness: The line thickness when drawing the polygons.
:param output_width: The default output resolution.
:return: An OpenCV image object.
"""
min_x = polygon_list[0].points[0][0]
min_y = polygon_list[0].points[0][1]
for polygon in polygon_list:
for point in polygon.points:
min_x = min(min_x, point[0])
min_y = min(min_y, point[1])
# Normalize based on the upper-left.
polygon_list_points = []
for polygon in polygon_list:
polygon_modified = []
for point in polygon.points:
polygon_modified.append((point[0] - min_x, point[1] - min_y))
polygon_list_points.append(polygon_modified)
max_x = polygon_list_points[0][0][0]
max_y = polygon_list_points[0][0][1]
for point in itertools.chain.from_iterable(polygon_list_points):
max_x = max(max_x, point[0])
max_y = max(max_y, point[1])
resize_ratio = output_width / max_x
# Normalize based on the max width.
polygon_list_points_modified = []
for polygon in polygon_list_points:
polygon_modified = []
for point in polygon:
polygon_modified.append((point[0] * resize_ratio, point[1] * resize_ratio))
polygon_list_points_modified.append(polygon_modified)
polygon_list_points = polygon_list_points_modified
max_x = max_x * resize_ratio
max_y = max_y * resize_ratio
polygon_list = [
p._replace(points=q) for p, q in zip(polygon_list, polygon_list_points)
]
img_floor_map = np.zeros([int(max_y + 1), int(max_x + 1), 3], dtype=np.uint8)
img_floor_map[:] = (255, 255, 255)
for polygon in polygon_list:
polygon_points = polygon.points
try:
# Draw wall elements like windows/doors/openings with increased thickness for better visualization.
wall_thickness = (
thickness if polygon.type == PolygonType.ROOM else 2 * thickness
)
cv2.polylines(
img_floor_map,
[np.int_([polygon_points])],
True,
POLYGON_COLOR[polygon.type],
thickness=wall_thickness,
lineType=cv2.LINE_AA,
)
# Draw the line/polygon points as Red dots, unless this is the camera center or pin label.
if (
polygon.type != PolygonType.PRIMARY_CAMERA
and polygon.type != PolygonType.SECONDARY_CAMERA
and polygon.type != PolygonType.PIN_LABEL
):
for point in polygon_points:
cv2.circle(
img_floor_map, tuple(np.int_(point)), thickness, (255, 0, 0), -1
)
elif polygon.type == PolygonType.PIN_LABEL:
pin_label_position = np.int_(polygon_points[0]) - np.array([40, 15])
cv2.putText(
img_floor_map,
polygon.name,
tuple(pin_label_position),
cv2.FONT_HERSHEY_SIMPLEX,
1.0,
POLYGON_COLOR[polygon.type],
2,
cv2.LINE_AA,
)
except Exception as ex:
LOG.debug("Error drawing {}: {} {}".format(jpg_file_name, polygon, str(ex)))
continue
# Convert from RGB to BGR (the default type OpenCV expects).
img_floor_map = cv2.cvtColor(img_floor_map, cv2.COLOR_RGB2BGR)
if jpg_file_name is not None:
cv2.imwrite(jpg_file_name, img_floor_map)
return img_floor_map
def render_room_vertices_on_panos(
panos_list: List[Dict[str, Any]], *, input_folder: str, output_folder: str
):
"""Render room vertices (floor and ceiling) and WDO elements on the pano textures.
:param panos_list: The list of collected per-pano data.
:param input_folder: The input folder, used to locate the pano textures.
:param output_folder: The output folder, where the rendered layouts will be saved to.
:return: None
"""
PolygonTypeMapping = {
"windows": PolygonType.WINDOW,
"doors": PolygonType.DOOR,
"openings": PolygonType.OPENING,
}
for pano_data in panos_list:
pano_id = pano_data["pano_id"]
pano_image_path = os.path.join(input_folder, pano_data["image"])
pano_image = PanoImage.from_file(pano_image_path)
pano_width = pano_image.width
transform = Transformation3D(
camera_height=pano_data["camera_height"],
ceiling_height=pano_data["ceiling_height"],
)
vertices_types_to_visualize = [pano_data["room_vertices"]]
# visualize internal_vertices (if any)
vertices_types_to_visualize.extend(pano_data["internal_vertices"])
for vertices_to_visualize in vertices_types_to_visualize:
floor_coordinates, ceiling_coordinates = transform.to_3d(
vertices_to_visualize
)
floor_coords = TransformationSpherical.normalize(floor_coordinates)
ceiling_coords = TransformationSpherical.normalize(ceiling_coordinates)
assert floor_coords.shape[0] == ceiling_coords.shape[0]
num_vertices = ceiling_coords.shape[0]
for room_coords in [floor_coords, ceiling_coords]:
for i in range(num_vertices):
point_start = room_coords[[i], :].tolist()
point_end = room_coords[[(i + 1) % num_vertices], :].tolist()
point_start_pix = TransformationSpherical.cartesian_to_pixel(
np.asarray(point_start).reshape(1, 3), pano_width
)
point_end_pix = TransformationSpherical.cartesian_to_pixel(
np.asarray(point_end).reshape(1, 3), pano_width
)
# Draw a dotted spherical line to represent the proxy layout geometry.
pano_image.draw_dotted_line(point_start, point_end)
# Draw markers to represent the corners.
pano_image.draw_marker(point_start_pix[0])
pano_image.draw_marker(point_end_pix[0])
for wdo_type in ["windows", "doors", "openings"]:
# Skip floor_plan_layouts that might be missing this field.
if wdo_type not in pano_data:
continue
wdo_vertices = pano_data[wdo_type]
if len(wdo_vertices) == 0:
continue
# Each WDO is represented by three continuous elements:
# (left boundary x,y); (right boundary x,y); (bottom boundary z, top boundary z)
assert len(wdo_vertices) % 3 == 0
num_wdo = len(wdo_vertices) // 3
for wdo_idx in range(num_wdo):
bottom_z = wdo_vertices[wdo_idx * 3 + 2][0]
top_z = wdo_vertices[wdo_idx * 3 + 2][1]
# wdo_bbox_3D contains four points at bottom left, bottom right, top right, top left
wdo_bbox_3D = np.array(
[
[
wdo_vertices[wdo_idx * 3][0],
wdo_vertices[wdo_idx * 3][1],
bottom_z,
],
[
wdo_vertices[wdo_idx * 3 + 1][0],
wdo_vertices[wdo_idx * 3 + 1][1],
bottom_z,
],
[
wdo_vertices[wdo_idx * 3 + 1][0],
wdo_vertices[wdo_idx * 3 + 1][1],
top_z,
],
[
wdo_vertices[wdo_idx * 3][0],
wdo_vertices[wdo_idx * 3][1],
top_z,
],
]
)
wdo_bbox_3D = TransformationSpherical.normalize(wdo_bbox_3D)
pano_image.draw_vertical_line(
(wdo_bbox_3D[0], wdo_bbox_3D[3]),
color=POLYGON_COLOR[PolygonTypeMapping[wdo_type]],
)
pano_image.draw_vertical_line(
(wdo_bbox_3D[1], wdo_bbox_3D[2]),
color=POLYGON_COLOR[PolygonTypeMapping[wdo_type]],
)
pano_image.draw_spherical_line(
wdo_bbox_3D[0],
wdo_bbox_3D[1],
color=POLYGON_COLOR[PolygonTypeMapping[wdo_type]],
)
pano_image.draw_spherical_line(
wdo_bbox_3D[3],
wdo_bbox_3D[2],
color=POLYGON_COLOR[PolygonTypeMapping[wdo_type]],
)
output_file_name = os.path.join(output_folder, "{}_layout.jpg".format(pano_id))
pano_image.write_to_file(output_file_name)
def render_raster_to_vector_alignment(
room_wdo_poly_list: List[Polygon],
transformation: np.ndarray,
floorplan_image_file_name: str,
output_file_name: str,
):
"""Render the raster to vector alignment as an image, using a transformation that maps the given vector
representation to the raster floor plan image.
Note that the alignment will not match perfectly due to the final floor plan clean-up stage introducing a
variety of final touchups, such as fixing misalignments of walls, doors, windows, etc.
:param room_wdo_poly_list: The vectorized floor plan as a set of polygons.
:param transformation: The 3-by-3 transformation representing translation, rotation and scale.
:param floorplan_image_file_name: The raster floor plan image.
:param output_file_name: The output file name where the raster to vector alignment image will be stored.
:return: None
"""
png_floor_image = cv2.imread(floorplan_image_file_name)
png_floor_image = cv2.cvtColor(png_floor_image, cv2.COLOR_BGR2RGB)
for room_wdo_poly in room_wdo_poly_list:
if room_wdo_poly.type == PolygonType.ROOM:
color = (255, 0, 0) # Red
else:
color = POLYGON_COLOR[room_wdo_poly.type]
room_wdo_vertices = room_wdo_poly.points
png_floor_coordinates = transformation.apply_inverse(room_wdo_vertices)
cv2.polylines(
png_floor_image,
[np.int_(png_floor_coordinates)],
True,
color,
thickness=2,
lineType=cv2.LINE_AA,
)
png_floor_image = cv2.cvtColor(png_floor_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(output_file_name, png_floor_image)
| 12,117
| 37.469841
| 111
|
py
|
zind
|
zind-main/code/utils.py
|
"""
This module contains some common routines and types used by other modules.
"""
import collections
from enum import Enum
from typing import List, NamedTuple, Tuple
import numpy as np
import shapely.geometry
# We use OpenCV's type as the underlying 2D image type.
Image = np.ndarray
CHECK_RIGHT_ANGLE_THRESH = 0.1
class Point2D(collections.namedtuple("Point2D", "x y")):
@classmethod
def from_tuple(cls, t: Tuple[np.float, np.float]):
return cls._make(t)
# The type of supported polygon/wall/point objects.
class PolygonType(Enum):
ROOM = "room"
WINDOW = "window"
DOOR = "door"
OPENING = "opening"
PRIMARY_CAMERA = "primary_camera"
SECONDARY_CAMERA = "secondary_camera"
PIN_LABEL = "pin_label"
PolygonTypeMapping = {
"windows": PolygonType.WINDOW,
"doors": PolygonType.DOOR,
"openings": PolygonType.OPENING,
}
class Polygon(
NamedTuple(
"Polygon", [("type", PolygonType), ("points", List[Point2D]), ("name", str)]
)
):
"""
Polygon class that can be used to represent polygons/lines as a list of points, the type and (optional) name
"""
__slots__ = ()
def __new__(cls, type, points, name=""):
return super(Polygon, cls).__new__(cls, type, points, name)
@staticmethod
def list_to_points(points: List[Tuple[np.float, np.float]]):
return [Point2D._make(p) for p in points]
@property
def to_list(self):
return [(p.x, p.y) for p in self.points]
@property
def num_points(self):
return len(self.points)
@property
def to_shapely_poly(self):
# Use this function when converting a closed room shape polygon
return shapely.geometry.polygon.Polygon(self.to_list)
@property
def to_shapely_line(self):
# Use this function when converting W/D/O elements since those are represented as lines.
return shapely.geometry.LineString(self.to_list)
| 1,941
| 24.552632
| 112
|
py
|
zind
|
zind-main/code/transformations.py
|
"""
This module provides utilities to handle the various coordinate system transformations:
1. Spherical to/from cartesian
2. 3D room layout to/from pano pixels
3. 3D room floor_plan_layouts to/from 2D top-down merged floor_plan_layouts
"""
import collections
import logging
import math
import sys
from typing import List, Dict, Any
import numpy as np
from utils import Point2D
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
class Transformation2D(
collections.namedtuple("Transformation", "rotation_matrix scale translation")
):
"""
Class to handle relative translation/rotation/scale of room shape coordinates
to transform them from local to the global frame of reference.
"""
@classmethod
def from_translation_rotation_scale(
cls, *, position: Point2D, rotation: float, scale: float
):
"""
Create a transformation object from the ZInD merged top-down geometry data
based on the given 2D translation (position), rotation angle and scale.
:param position: 2D translation (in the x-y plane)
:param rotation: Rotation angle in degrees (in the x-y plane)
:param scale: Scale factor for all the coordinates
:return: A transformation object that can later be applied on a list of
coordinates in local frame of reference to move them into the global
(merged floor map) frame of reference.
"""
translation = np.array([position.x, position.y]).reshape(1, 2)
rotation_angle = np.radians(rotation)
rotation_matrix = np.array(
[
[np.cos(rotation_angle), np.sin(rotation_angle)],
[-np.sin(rotation_angle), np.cos(rotation_angle)],
]
)
return cls(
rotation_matrix=rotation_matrix, scale=scale, translation=translation
)
@classmethod
def from_zind_data(cls, zind_transformation: Dict[str, Any]):
"""
Create a transformation object from the ZInD JSON blob.
:param zind_transformation: Dict with "translation", "rotation" and "scale" fields.
:return: A transformation object that can later be applied on a list of
coordinates in local frame of reference to move them into the global
(merged floor map) frame of reference.
"""
return Transformation2D.from_translation_rotation_scale(
position=Point2D.from_tuple(zind_transformation["translation"]),
rotation=zind_transformation["rotation"],
scale=zind_transformation["scale"],
)
def to_global(self, coordinates):
"""
Apply transformation on a list of 2D points to transform them from local to global frame of reference.
:param coordinates: List of 2D coordinates in local frame of reference.
:return: The transformed list of 2D coordinates.
"""
coordinates = coordinates.dot(self.rotation_matrix) * self.scale
coordinates += self.translation
return coordinates
def apply_inverse(self, coordinates):
coordinates -= self.translation
coordinates = coordinates.dot(self.rotation_matrix.T) / self.scale
return coordinates
class TransformationSpherical:
"""
Class to handle various spherical transformations.
"""
EPS = np.deg2rad(1) # Absolute precision when working with radians.
def __init__(self):
pass
@classmethod
def rotate(cls, input_array: np.ndarray):
return input_array.dot(cls.ROTATION_MATRIX)
@staticmethod
def normalize(points_cart: np.ndarray) -> np.ndarray:
"""
Normalize a set of 3D vectors.
"""
num_points = points_cart.shape[0]
assert num_points > 0
num_coords = points_cart.shape[1]
assert num_coords == 3
rho = np.sqrt(np.sum(np.square(points_cart), axis=1))
return points_cart / rho.reshape(num_points, 1)
@staticmethod
def cartesian_to_sphere(points_cart: np.ndarray) -> np.ndarray:
"""
Convert cartesian to spherical coordinates.
"""
output_shape = (points_cart.shape[0], 3) # type: ignore
num_points = points_cart.shape[0]
assert num_points > 0
num_coords = points_cart.shape[1]
assert num_coords == 3
x_arr = points_cart[:, 0]
y_arr = points_cart[:, 1]
z_arr = points_cart[:, 2]
# Azimuth angle is in [-pi, pi].
# Note the x-axis flip to align the handedness of the pano and room shape coordinate systems.
theta = np.arctan2(-x_arr, y_arr)
# Radius can be anything between (0, inf)
rho = np.sqrt(np.sum(np.square(points_cart), axis=1))
phi = np.arcsin(z_arr / rho) # Map elevation to [-pi/2, pi/2]
return np.column_stack((theta, phi, rho)).reshape(output_shape)
@classmethod
def sphere_to_pixel(cls, points_sph: np.ndarray, width: int) -> np.ndarray:
"""
Convert spherical coordinates to pixel coordinates inside a 360 pano image with a given width.
"""
output_shape = (points_sph.shape[0], 2) # type: ignore
num_points = points_sph.shape[0]
assert num_points > 0
num_coords = points_sph.shape[1]
assert num_coords == 2 or num_coords == 3
height = width / 2
assert width > 1 and height > 1
# We only consider the azimuth and elevation angles.
theta = points_sph[:, 0]
assert np.all(np.greater_equal(theta, -math.pi - cls.EPS))
assert np.all(np.less_equal(theta, math.pi + cls.EPS))
phi = points_sph[:, 1]
assert np.all(np.greater_equal(phi, -math.pi / 2.0 - cls.EPS))
assert np.all(np.less_equal(phi, math.pi / 2.0 + cls.EPS))
# Convert the azimuth to x-coordinates in the pano image, where
# theta = 0 maps to the horizontal center.
x_arr = theta + math.pi # Map to [0, 2*pi]
x_arr /= 2.0 * math.pi # Map to [0, 1]
x_arr *= width - 1 # Map to [0, width)
# Convert the elevation to y-coordinates in the pano image, where
# phi = 0 maps to the vertical center.
y_arr = phi + math.pi / 2.0 # Map to [0, pi]
y_arr /= math.pi # Map to [0, 1]
y_arr = 1.0 - y_arr # Flip so that y goes up.
y_arr *= height - 1 # Map to [0, height)
return np.column_stack((x_arr, y_arr)).reshape(output_shape)
@classmethod
def cartesian_to_pixel(cls, points_cart: np.ndarray, width: int):
return cls.sphere_to_pixel(cls.cartesian_to_sphere(points_cart), width)
class Transformation3D:
"""
Class to handle transformation from the 2D top-down floor map coordinates to 3D cartesian coordinates
"""
def __init__(self, ceiling_height: float, camera_height: float):
"""
:param ceiling_height: The height of the ceiling
:param camera_height: The height of the camera
"""
self._ceiling_height = ceiling_height
self._camera_height = camera_height
def to_3d(self, room_vertices: List[Point2D]):
"""
Transform 2D room vertices to 3D cartesian points.
:param room_vertices: The top-down 2D projected vertices
:return: Both the floor as well as the ceiling vertices in 3D cartesian coordinates
"""
# Extract and format room shape coordinates
num_vertices = room_vertices.shape[0]
floor_z = np.repeat([-self._camera_height], num_vertices).reshape(
num_vertices, 1
)
ceiling_z = np.repeat(
[self._ceiling_height - self._camera_height], num_vertices
).reshape(num_vertices, 1)
# Create floor and ceiling coordinates
floor_coordinates = np.hstack((room_vertices, floor_z))
ceiling_coordinates = np.hstack((room_vertices, ceiling_z))
return floor_coordinates, ceiling_coordinates
| 7,990
| 33.895197
| 110
|
py
|
zind
|
zind-main/code/pano_image.py
|
"""PanoImage module provides utilities to represent and work with 360 images (in Equirectangular projection).
Typical usage example:
pano_image = PanoImage(image_file_path) # Load a 360 panorama.
pano_image.draw_XXX(...) # Draw various elements like markers and lines.
pano_image_cv = pano_image.opencv_image # Access the underlying OpenCV representation (in a mutable way).
"""
import logging
import sys
from typing import Tuple
import cv2
import numpy as np
from transformations import TransformationSpherical
from utils import Image
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
class PanoImageException(Exception):
"""Custom exception that represents a failure to load a valid 360 panorama."""
def __init__(self, message):
message = f"ZInD failed! Error: {message}"
super(PanoImageException, self).__init__(message)
class PanoImage:
"""Load, parse and represent 360 panorama images in Equirectangular projection.
The class also implements a few drawing utilities directly in the Equirectangular projection.
"""
def __init__(self, image_cv: Image) -> None:
"""Initialize 360 panorama from a given OpenCV RGB image.
:param image_cv: uint8 OpenCV image.
"""
self._image = image_cv
self._validate_image()
@classmethod
def from_file(cls, image_file_path: str) -> "PanoImage":
"""Initialize 360 panorama from a given RGB image file
:param image_file_path: The path to the 360 panorama image.
:return: The corresponding PanoImage object.
"""
image_cv = cv2.imread(image_file_path, cv2.IMREAD_COLOR)
# This can happen because of missing file, improper permissions, unsupported or invalid format, etc.
if image_cv is None:
raise PanoImageException("Can not load image: {}".format(image_file_path))
# Convert the underlying data structure from uint8 BGR to uint8 RGB.
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
return cls(image_cv)
def write_to_file(self, file_path: str) -> None:
# Convert the underlying image from RGB floating point to bgr uint8.
image_bgr_cv = cv2.cvtColor(self.opencv_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(file_path, image_bgr_cv)
def _validate_image(self):
"""Verify whether the underlying image represents a valid 360 panorama.
This method will throw an instance of PanoImageException if validation fails.
"""
if self._image is None:
raise PanoImageException("Empty image")
if not np.issubdtype(self._image.dtype, np.uint8):
raise PanoImageException("Expecting uint8 image: %s" % self._image.dtype)
if not self._has_valid_fov:
raise PanoImageException(
"Invalid pano dimensions: %d-by-%d" % (self.width, self.height)
)
@property
def _has_valid_fov(self) -> bool:
"""Return true if the pano image dimensions could represent a valid full FoV pano, i.e. 2:1."""
return self.width == 2 * self.height
@property
def width(self) -> int:
"""Return the width of the pano."""
return self._image.shape[1]
@property
def height(self) -> int:
"""Return the height of the pano."""
return self._image.shape[0]
@property
def opencv_image(self) -> Image:
"""Return a shallow copy of the underlying OpenCV image data.
Note: this is a mutable object and should be treated with care!
"""
return self._image
def draw_marker(
self,
point_pix: np.ndarray,
*,
color: Tuple[int, int, int] = (255, 255, 0),
marker_size: int = 10,
thickness: int = 2,
):
"""Draw a tilted cross marker (on the underlying pano) given its center in pixel coordinates.
:param point_pix: The center of the point (in image coordinates).
:param color: The RGB color of the marker.
:param marker_size: The size of the marker.
:param thickness: The thickness of the marker.
"""
cv2.drawMarker(
self.opencv_image,
tuple(np.int_(point_pix)),
color,
cv2.MARKER_TILTED_CROSS,
markerSize=marker_size,
thickness=thickness,
)
def draw_spherical_line(
self,
point_start_cart: np.ndarray,
point_end_cart: np.ndarray,
color: Tuple[int, int, int] = (255, 255, 255),
thickness: int = 2,
thresh_deg: float = 0.5,
):
"""Draw a spherical line corresponding to the shorter arc, by properly handling loop-closure crossing lines.
:param point_start_cart: The start point (in cartesian coordinates).
:param point_end_cart: The end point (in cartesian coordinates).
:param color: The RGB color of the line.
:param thickness: The thickness of the line.
:param thresh_deg: The angular resolution for approximating spherical line as a set of polylines.
"""
pt1 = point_start_cart.reshape(1, 3)
pt2 = point_end_cart.reshape(1, 3)
thresh_rad = np.deg2rad(thresh_deg)
pt1_pix = TransformationSpherical.cartesian_to_pixel(pt1, self.width)
points_stack = [[pt1_pix[0, 0], pt1_pix[0, 1]]]
lines_stack = [(pt1, pt2)]
while lines_stack:
line_curr = lines_stack.pop()
pt1 = line_curr[0]
pt2 = line_curr[1]
angle_curr = np.arccos(np.clip(np.dot(pt1, pt2.T), -1, 1))
if angle_curr < thresh_rad:
pt2_pix = TransformationSpherical.cartesian_to_pixel(pt2, self.width)
points_stack.append([pt2_pix[0, 0], pt2_pix[0, 1]])
else:
mid_pt = 0.5 * (pt1 + pt2)
mid_pt /= np.linalg.norm(mid_pt)
lines_stack.append((mid_pt, pt2))
lines_stack.append((pt1, mid_pt))
# In case of a loop closure line, we split it into two poly lines.
if self._is_loop_closure_line(self.width, point_start_cart, point_end_cart):
idx_cut = -1
for idx, pt_curr in enumerate(points_stack[:-1]):
pt_next = points_stack[idx + 1]
if abs(pt_curr[0] - pt_next[0]) > self.width / 2:
idx_cut = idx
assert 0 <= idx_cut < len(points_stack)
points_left = np.int32([points_stack[0 : idx_cut + 1]])
cv2.polylines(self.opencv_image, points_left, False, color, thickness)
points_right = np.int32([points_stack[idx_cut + 1 : -1]])
cv2.polylines(self.opencv_image, points_right, False, color, thickness)
else:
cv2.polylines(
self.opencv_image, np.int32([points_stack]), False, color, thickness
)
def draw_dotted_line(
self,
point_start_cart,
point_end_cart,
*,
color: Tuple[int, int, int] = (255, 255, 255),
thickness: int = 1,
thresh_deg: float = 2.0,
):
"""Draw a dotted spherical line given the two end points in cartesian coordinates.
:param point_start_cart: The start point.
:param point_end_cart: The end point.
:param color: The RGB color of the line.
:param thickness: The thickness of the line.
:param thresh_deg: The distance between the dotted points (in degrees).
"""
# Controls the angular space between the dots.
thresh_rad = np.deg2rad(thresh_deg)
pt1 = np.asarray(point_start_cart).reshape(1, 3)
pt2 = np.asarray(point_end_cart).reshape(1, 3)
pt1_pix = TransformationSpherical.cartesian_to_pixel(pt1, self.width)
points_stack = [[pt1_pix[0, 0], pt1_pix[0, 1]]]
lines_stack = [(pt1, pt2)]
while lines_stack:
line_curr = lines_stack.pop()
pt1 = line_curr[0]
pt2 = line_curr[1]
angle_curr = np.arccos(np.clip(np.dot(pt1, pt2.T), -1, 1))
if angle_curr < thresh_rad:
pt2_pix = TransformationSpherical.cartesian_to_pixel(pt2, self.width)
points_stack.append([pt2_pix[0, 0], pt2_pix[0, 1]])
else:
mid_pt = 0.5 * (pt1 + pt2)
mid_pt /= np.linalg.norm(mid_pt)
lines_stack.append((mid_pt, pt2))
lines_stack.append((pt1, mid_pt))
# Transform to pixel coordinates.
mid_pt_pix = TransformationSpherical.cartesian_to_pixel(
mid_pt, self.width
)
cv2.circle(
self.opencv_image,
tuple(np.int_(mid_pt_pix[0])),
thickness,
color,
-1,
)
def draw_vertical_line(
self,
points: Tuple[np.ndarray, np.ndarray],
*,
color: Tuple[int, int, int] = (255, 255, 255),
thickness: int = 2,
):
"""Draw a vertical line given the two points in cartesian coordinates."""
pt1_pix, pt2_pix = self._get_vertical_line_image_coordinates(points)
cv2.line(
self.opencv_image,
(pt1_pix[0], pt1_pix[1]),
(pt2_pix[0], pt2_pix[1]),
color,
thickness=thickness,
)
def _get_vertical_line_image_coordinates(
self, points: Tuple[np.ndarray, np.ndarray]
):
"""Get the endpoints of a vertical line, in img coords, running from 3d points[0] to points[1]"""
pt1_pix = TransformationSpherical.cartesian_to_pixel(
points[0].reshape(1, 3), self.width
)
pt1_pix = np.int_(np.squeeze(pt1_pix))
pt2_pix = TransformationSpherical.cartesian_to_pixel(
points[1].reshape(1, 3), self.width
)
pt2_pix = np.int_(np.squeeze(pt2_pix))
return pt1_pix, pt2_pix
def _is_loop_closure_line(self, width: int, pt1: np.ndarray, pt2: np.ndarray):
"""Check if a given line is a "loop closure line", meaning that it's rendering on the pano texture would
wrap around the left/right border.
"""
pt1 = pt1.reshape(1, 3)
pt2 = pt2.reshape(1, 3)
pt1_pix = TransformationSpherical.cartesian_to_pixel(pt1, width)
pt2_pix = TransformationSpherical.cartesian_to_pixel(pt2, width)
mid_pt = 0.5 * (pt1 + pt2)
mid_pt /= np.linalg.norm(mid_pt)
mid_pt_pix = TransformationSpherical.cartesian_to_pixel(mid_pt, width)
dist_total = abs(pt1_pix[0, 0] - pt2_pix[0, 0])
dist_left = abs(pt1_pix[0, 0] - mid_pt_pix[0, 0])
dist_right = abs(pt2_pix[0, 0] - mid_pt_pix[0, 0])
return dist_total > width / 2.0 or dist_left + dist_right > dist_total + 1
| 10,955
| 35.398671
| 116
|
py
|
zind
|
zind-main/code/floor_plan.py
|
"""FloorPlan module provides utilities to represent and work with ZInD floor plans.
Typical usage example:
zfp = FloorPlan(input_json_file) # Load and parse ZInD JSON file as a Zillow FloorPlan object.
top_down_layouts = zfp.floor_plan_layouts["raw"] # Retrieve the 2D merged top-down floor plan layouts and WDO.
panos_layouts = zfp.panos_layouts["raw"]["primary"] # Retrieve the 3D per-pano floor plan layouts and WDO.
"""
import json
import logging
import sys
from pathlib import Path
from typing import Dict, Any, List
import numpy as np
from transformations import Transformation2D
from utils import Polygon, PolygonType, PolygonTypeMapping
RAW_GEOMETRY_KEY = "raw"
COMPLETE_GEOMETRY_KEY = "complete"
VISIBLE_GEOMETRY_KEY = "visible"
REDRAW_GEOMETRY_KEY = "redraw"
PRIMARY_PANO_KEY = "primary"
SECONDARY_PANO_KEY = "secondary"
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
class FloorPlanException(Exception):
"""Exception class for ZInD floor plan data parsing and validation."""
def __init__(self, message):
message = f"ZInD failed! Error: {message}"
super(FloorPlanException, self).__init__(message)
class FloorPlan:
"""Load, parse and retrieve ZInD floor plan data.
A class that handles the custom ZInD floor plan type as a collection of polygon room shapes, wall objects like
windows/doors/openings (WDO for short) and their location in the global merged floor plan coordinate system.
"""
def __init__(self, json_file_name):
"""The init method will create a floor plan object from the custom ZInD JSON file."""
with open(json_file_name) as json_file:
floor_map_json = json.load(json_file)
# We will split the parsed 3D per-pano layouts based on the different layouts and pano types.
self._panos_layouts = {
RAW_GEOMETRY_KEY: {PRIMARY_PANO_KEY: [], SECONDARY_PANO_KEY: []},
COMPLETE_GEOMETRY_KEY: {PRIMARY_PANO_KEY: [], SECONDARY_PANO_KEY: []},
VISIBLE_GEOMETRY_KEY: {PRIMARY_PANO_KEY: [], SECONDARY_PANO_KEY: []},
}
self._json_file_name = json_file_name
self._input_folder = Path(json_file_name).resolve().parent
# We will split the parsed 2D floor plan data based on the different layouts, e.g. raw, complete or redraw.
self._floor_plan_layouts = {
RAW_GEOMETRY_KEY: {},
COMPLETE_GEOMETRY_KEY: {},
REDRAW_GEOMETRY_KEY: {},
}
# Collect floor plan redraw alignment data.
self._floor_plan_redraw_align = {}
self._floor_plan_complete_align = {}
self._floor_plan_to_redraw_transformation = {}
self._floor_plan_image_path = {}
# Make sure we have the required top-level fields
assert "merger" in floor_map_json
assert "redraw" in floor_map_json
assert "floorplan_to_redraw_transformation" in floor_map_json
merger_data = floor_map_json["merger"]
redraw_data = floor_map_json["redraw"]
floor_plan_redraw_alignment_data = floor_map_json[
"floorplan_to_redraw_transformation"
]
# Parse the merger data: raw, complete, and visible layouts + WDO as well as the draft floor plan.
self.parse_merger(merger_data)
# Parse the redraw data: this is the final cleanup floor-plan.
self.parse_redraw(redraw_data)
# Parse the transformations that map between the final floor plan image (raster) and the redraw data (vector).
self.parse_floor_plan_redraw_alignment(floor_plan_redraw_alignment_data)
@property
def input_folder(self):
return self._input_folder
@property
def panos_layouts(self):
return self._panos_layouts
@property
def floor_plan_layouts(self):
return self._floor_plan_layouts
@property
def floor_plan_to_redraw_transformation(self):
return self._floor_plan_to_redraw_transformation
@property
def floor_plan_image_path(self):
return self._floor_plan_image_path
def parse_redraw(self, redraw_data: Dict[str, Any]):
"""Parse the final redraw geometry.
The function will modify self._floor_plan_layouts
:param redraw_data: The parsed ZInD redraw data.
:return: None
"""
for floor_id, floor_data in redraw_data.items():
self._floor_plan_layouts[REDRAW_GEOMETRY_KEY][floor_id] = []
redraw_vertices_list = []
redraw_room_wdo_poly_list = []
for room_id, room_data in floor_data.items():
room_vertices_global = room_data["vertices"]
redraw_room_wdo_poly_list.append(
Polygon(
type=PolygonType.ROOM,
name=self._json_file_name,
points=Polygon.list_to_points(np.array(room_vertices_global)),
)
)
redraw_vertices_list.extend(room_vertices_global)
zind_points_list = Polygon.list_to_points(room_vertices_global)
zind_poly = Polygon(type=PolygonType.ROOM, points=zind_points_list)
# Validate the room polygon: may throw FloorPlanException.
self.validate_room_polygon(zind_poly)
self._floor_plan_layouts[REDRAW_GEOMETRY_KEY][floor_id].append(
zind_poly
)
# Parse windows/doors, note that the redraw geometry does not contain openings.
for wdo_type in ["windows", "doors"]:
for wdo_data in room_data[wdo_type]:
redraw_room_wdo_poly_list.append(
Polygon(
type=PolygonTypeMapping[wdo_type],
points=Polygon.list_to_points(np.array(wdo_data)),
)
)
wdo_poly = Polygon(
type=PolygonTypeMapping[wdo_type],
points=Polygon.list_to_points(wdo_data),
)
self._floor_plan_layouts[REDRAW_GEOMETRY_KEY][floor_id].append(
wdo_poly
)
# Collect pin labels (if any).
for pin_data in room_data["pins"]:
pin_label_position = np.array(pin_data["position"], ndmin=2)
pin_label_poly = Polygon(
type=PolygonType.PIN_LABEL,
name=pin_data["label"],
points=Polygon.list_to_points(pin_label_position),
)
self._floor_plan_layouts[REDRAW_GEOMETRY_KEY][floor_id].append(
pin_label_poly
)
def _parse_merger_wdo(
self,
wdo_vertices_local: List[float],
*,
wdo_type: str,
transformation: np.ndarray,
zind_poly: Polygon = None,
):
"""Parse the WDO portion of the layout field.
:param wdo_vertices_local: The WDO positions in the local coordinate system.
:param wdo_type: The type, e.g. window, door or opening
:param transformation: The local to global coordinate system transformation.
:param zind_poly: ZInD room shape polygon object.
:return: The parsed WDO elements as a list of polygons.
"""
# Skip if there are no elements of this type.
if len(wdo_vertices_local) == 0:
return
num_wdo = len(wdo_vertices_local) // 3
wdo_left_right_bound = []
for wdo_idx in range(num_wdo):
wdo_left_right_bound.extend(
wdo_vertices_local[wdo_idx * 3 : wdo_idx * 3 + 2]
)
wdo_vertices_global = transformation.to_global(np.array(wdo_left_right_bound))
wdo_poly_list = []
# Every two points in the list define windows/doors/openings by left and right boundaries, so
# for N elements we will have 2 * N pair of points, thus we iterate on every successive pair
for wdo_points in zip(wdo_vertices_global[::2], wdo_vertices_global[1::2]):
zind_points_list = Polygon.list_to_points(wdo_points)
zind_poly_type = PolygonTypeMapping[wdo_type]
wdo_poly = Polygon(
type=zind_poly_type, name=self._json_file_name, points=zind_points_list
)
# Add the WDO element to the list of polygons/lines.
wdo_poly_list.append(wdo_poly)
# Validate the WDO element: may throw FloorPlanException
self.validate_wdo_polygon(wdo_poly, zind_poly=zind_poly)
return wdo_poly_list
def parse_merger(self, merger_data: Dict[str, Any]):
"""Parse the merger data field.
This includes the following information for every floor:
1. The tree-like structure of complete, partial rooms and raw layout annotations.
1. The raw, complete and visible 3D layouts for each pano alongside the WDO elements.
2. The 2D transformations to build the draft merger floor plan from the individual layout pieces.
The function will modify (1) self._panos_layouts and (2) self._floor_plan_layouts
:param merger_data: The merger data field from the ZInD JSON.
:return: None
"""
# Top level merger data is per-floor.
for floor_id, floor_data in merger_data.items():
# Create a list of all the ZInD polygons for this floor: rooms, windows, doors, openings.
raw_zind_poly_list = []
complete_zind_poly_list = []
for complete_room_id, complete_room_data in floor_data.items():
complete_geometry_has_collected = False
for partial_room_id, partial_room_data in complete_room_data.items():
for pano_id, pano_data in partial_room_data.items():
# Create a transformation object that will move points from local to global coordinates.
transformation = Transformation2D.from_zind_data(
pano_data["floor_plan_transformation"]
)
for geometry_type in [
"layout_raw",
"layout_complete",
"layout_visible",
]:
if geometry_type not in pano_data:
# Note that it is expected behavior to have missing floor plan layouts for some rooms,
# e.g. small closet that often times have outside the room annotations.
LOG.debug(
"Missing layout {}: {}/{}/{}/{}".format(
geometry_type,
floor_id,
complete_room_id,
partial_room_id,
pano_id,
)
)
continue
zind_poly_list = []
# Transform and validate the room shape polygon.
room_vertices_local = np.asarray(
pano_data[geometry_type]["vertices"]
)
room_vertices_global = transformation.to_global(
room_vertices_local
)
zind_points_list = Polygon.list_to_points(
room_vertices_global
)
zind_poly = Polygon(
type=PolygonType.ROOM, points=zind_points_list
)
self.validate_room_polygon(zind_poly)
zind_poly_list.append(zind_poly)
# For complete geometry, we need to visualize the internal vertices.
internal_vertices_local_list = []
if geometry_type == "layout_complete":
for internal_vertices in pano_data[geometry_type][
"internal"
]:
# Validate internal vertices.
internal_vertices_local = np.asarray(
internal_vertices
)
internal_vertices_local_list.append(
internal_vertices_local
)
internal_vertices_global = transformation.to_global(
internal_vertices_local
)
internal_poly = Polygon(
type=PolygonType.ROOM,
points=Polygon.list_to_points(
internal_vertices_global
),
)
self.validate_room_polygon(internal_poly)
zind_poly_list.append(internal_poly)
image_path = pano_data["image_path"]
# Collect pano data that we will use later to verify rendering on the pano textures.
pano_data_for_render = {}
pano_data_for_render["pano_id"] = "_".join(
[floor_id, complete_room_id, partial_room_id, pano_id]
)
pano_data_for_render["room_vertices"] = room_vertices_local
pano_data_for_render[
"internal_vertices"
] = internal_vertices_local_list
pano_data_for_render["image"] = image_path
pano_data_for_render["camera_height"] = pano_data[
"camera_height"
]
pano_data_for_render["ceiling_height"] = pano_data[
"ceiling_height"
]
# Add the camera center to the list of elements to render later.
# Note that the local 2D camera center coordinate is always at (0, 0).
camera_center_global = transformation.to_global(
np.asarray([[0, 0]])
)
camera_type = (
PolygonType.PRIMARY_CAMERA
if pano_data["is_primary"]
else PolygonType.SECONDARY_CAMERA
)
camera_center_poly = Polygon(
type=camera_type,
points=Polygon.list_to_points(camera_center_global),
)
wdo_poly_list = []
# Parse the WDO elements (if any).
for wdo_type in ["windows", "doors", "openings"]:
wdo_vertices_local = np.asarray(
pano_data[geometry_type][wdo_type]
)
pano_data_for_render[wdo_type] = wdo_vertices_local
# Skip if there are no elements of this type.
if len(wdo_vertices_local) == 0:
continue
# Parse the current list of WDO elements and add it to the global list.
wdo_poly_list.extend(
self._parse_merger_wdo(
wdo_vertices_local,
wdo_type=wdo_type,
transformation=transformation,
zind_poly=zind_poly,
)
)
if pano_data["is_primary"]:
pano_key = PRIMARY_PANO_KEY
else:
pano_key = SECONDARY_PANO_KEY
# Add the current room shape, camera and wdo polygons to the list of all polygons.
if geometry_type == "layout_raw":
self._panos_layouts[RAW_GEOMETRY_KEY][pano_key].append(
pano_data_for_render
)
raw_zind_poly_list.append(camera_center_poly)
if pano_data["is_primary"]:
raw_zind_poly_list.append(zind_poly)
raw_zind_poly_list.extend(wdo_poly_list)
elif geometry_type == "layout_complete":
self._panos_layouts[COMPLETE_GEOMETRY_KEY][
pano_key
].append(pano_data_for_render)
complete_zind_poly_list.append(camera_center_poly)
if not complete_geometry_has_collected:
complete_zind_poly_list.extend(zind_poly_list)
complete_zind_poly_list.extend(wdo_poly_list)
complete_geometry_has_collected = True
elif geometry_type == "layout_visible":
self._panos_layouts[VISIBLE_GEOMETRY_KEY][
pano_key
].append(pano_data_for_render)
# Note that we do not visualize floor plan for visible geometry
else:
raise Exception(
"Invalid geometry_type: {}".format(geometry_type)
)
self._floor_plan_layouts[RAW_GEOMETRY_KEY][floor_id] = raw_zind_poly_list
self._floor_plan_layouts[COMPLETE_GEOMETRY_KEY][
floor_id
] = complete_zind_poly_list
def parse_floor_plan_redraw_alignment(
self, floor_plan_redraw_alignment_data: Dict[str, Any]
):
"""Parse the alignment between the raster floor plan image and the final redraw geometry.
The function will modify self._floor_plan_to_redraw_transformation
:param floor_plan_redraw_alignment_data: The parsed ZInD floor plan to redraw alignment data.
:return: None
"""
for floor_id, floor_data in floor_plan_redraw_alignment_data.items():
transformation = Transformation2D.from_zind_data(floor_data)
self._floor_plan_to_redraw_transformation[floor_id] = transformation
self._floor_plan_image_path[floor_id] = floor_data["image_path"]
def validate_room_polygon(self, zind_poly: Polygon):
"""Validate room polygon vertices:
(1) Each room polygon must have 3 points or above.
(2) No self intersections.
:param zind_poly: ZInD polygon object.
Throws a FloorPlanException if data can not be validated
"""
if zind_poly.num_points < 3:
raise FloorPlanException(
"Invalid room polygon (insufficient number of corners): {}".format(
zind_poly
)
)
shapely_poly = zind_poly.to_shapely_poly
if not shapely_poly.is_valid:
raise FloorPlanException(
"Invalid polygon (self-intersecting): {}".format(zind_poly)
)
def validate_wdo_polygon(
self,
wdo_poly: Polygon,
*,
zind_poly: Polygon = None,
dist_threshold: float = 1.0,
):
"""Validate WDO element vertices against the corresponding ZInD room shape polygon:
(1) Each WDO element must be defined by exactly two points (left/right boundaries)
(2) Each WDO element must intersect the room shape geometry
(3) TODO: verify that each WDO element lies on a single polygon line
:param wdo_poly: WDO polygon object.
:param zind_poly: ZInD polygon object (if None then no validation against room shape will be performed).
:param dist_threshold: Threshold that will be used to check if two geometries intersect (using shapely).
Throws a FloorPlanException if data can not be validated
"""
if wdo_poly.num_points != 2:
raise FloorPlanException(
"Invalid WDO number of corners: {}".format(wdo_poly)
)
wdo_poly_shapely = wdo_poly.to_shapely_line
if zind_poly is not None:
zind_poly_shapely = zind_poly.to_shapely_poly
dist = wdo_poly_shapely.distance(zind_poly_shapely)
is_intersected = dist < dist_threshold
if not is_intersected:
raise FloorPlanException(
"Invalid WDO room shape intersection: {} {}".format(wdo_poly, dist)
)
| 21,972
| 43.842857
| 118
|
py
|
LasagneNLP
|
LasagneNLP-master/bi_lstm.py
|
__author__ = 'max'
import time
import sys
import argparse
from lasagne_nlp.utils import utils
import lasagne_nlp.utils.data_processor as data_processor
import theano.tensor as T
import theano
import lasagne
from lasagne_nlp.networks.networks import build_BiLSTM
import lasagne.nonlinearities as nonlinearities
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional LSTM')
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna'], help='Embedding for words',
required=True)
parser.add_argument('--embedding_dict', default='data/word2vec/GoogleNews-vectors-negative300.bin',
help='path for embedding dict')
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in LSTM')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--peepholes', action='store_true', help='Peepholes for LSTM')
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov'], help='update algorithm', default='sgd')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training',
required=True)
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
def construct_input_layer():
if fine_tune:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
output_size=embedd_dim,
W=embedd_table, name='embedding')
return layer_embedding
else:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
name='input')
return layer_input
logger = utils.get_logger("BiLSTM")
fine_tune = args.fine_tune
oov = args.oov
regular = args.regular
embedding = args.embedding
embedding_path = args.embedding_dict
train_path = args.train
dev_path = args.dev
test_path = args.test
update_algo = args.update
grad_clipping = args.grad_clipping
peepholes = args.peepholes
gamma = args.gamma
output_predict = args.output_prediction
dropout = args.dropout
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
embedd_table, label_alphabet, _, _, _, _ = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
test_path, oov=oov,
fine_tune=fine_tune,
embedding=embedding,
embedding_path=embedding_path)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
# construct input and mask layers
layer_incoming = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-lstm
num_units = args.num_units
bi_lstm = build_BiLSTM(layer_incoming, num_units, mask=layer_mask, grad_clipping=grad_clipping,
peepholes=peepholes, dropout=dropout)
# reshape bi-rnn to [batch * max_length, num_units]
bi_lstm = lasagne.layers.reshape(bi_lstm, (-1, [2]))
# construct output layer (dense layer with softmax)
layer_output = lasagne.layers.DenseLayer(bi_lstm, num_units=num_labels, nonlinearity=nonlinearities.softmax,
name='softmax')
# get output of bi-rnn shape=[batch * max_length, #label]
prediction_train = lasagne.layers.get_output(layer_output)
prediction_eval = lasagne.layers.get_output(layer_output, deterministic=True)
final_prediction = T.argmax(prediction_eval, axis=1)
# flat target_var to vector
target_var_flatten = target_var.flatten()
# flat mask_var to vector
mask_var_flatten = mask_var.flatten()
# compute loss
num_loss = mask_var_flatten.sum(dtype=theano.config.floatX)
# for training, we use mean of loss over number of labels
loss_train = lasagne.objectives.categorical_crossentropy(prediction_train, target_var_flatten)
loss_train = (loss_train * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(layer_output, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
loss_eval = lasagne.objectives.categorical_crossentropy(prediction_eval, target_var_flatten)
loss_eval = (loss_eval * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# compute number of correct labels
corr_train = lasagne.objectives.categorical_accuracy(prediction_train, target_var_flatten)
corr_train = (corr_train * mask_var_flatten).sum(dtype=theano.config.floatX)
corr_eval = lasagne.objectives.categorical_accuracy(prediction_eval, target_var_flatten)
corr_eval = (corr_eval * mask_var_flatten).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layers.get_all_params(layer_output, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([input_var, target_var, mask_var], [loss_train, corr_train, num_loss], updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([input_var, target_var, mask_var], [loss_eval, corr_eval, num_loss, final_prediction])
# Finally, launch the training loop.
logger.info(
"Start training: %s with regularization: %s(%f), dropout: %s, fine tune: %s (#training data: %d, batch size: %d, clip: %.1f, peepholes: %s)..." \
% (
update_algo, regular, (0.0 if regular == 'none' else gamma), dropout, fine_tune, num_data, batch_size,
grad_clipping,
peepholes))
num_batches = num_data / batch_size
num_epochs = 1000
best_loss = 1e+12
best_acc = 0.0
best_epoch_loss = 0
best_epoch_acc = 0
best_loss_test_err = 0.
best_loss_test_corr = 0.
best_acc_test_err = 0.
best_acc_test_corr = 0.
stop_count = 0
lr = learning_rate
patience = 5
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_total = 0
start_time = time.time()
num_back = 0
train_batches = 0
for batch in utils.iterate_minibatches(X_train, Y_train, masks=mask_train, batch_size=batch_size, shuffle=True):
inputs, targets, masks, _ = batch
err, corr, num = train_fn(inputs, targets, masks)
train_err += err * num
train_corr += corr
train_total += num
train_batches += 1
time_ave = (time.time() - start_time) / train_batches
time_left = (num_batches - train_batches) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, time left (estimated): %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, time: %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time.time() - start_time)
# evaluate performance on dev data
dev_err = 0.0
dev_corr = 0.0
dev_total = 0
for batch in utils.iterate_minibatches(X_dev, Y_dev, masks=mask_dev, batch_size=batch_size):
inputs, targets, masks, _ = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks)
dev_err += err * num
dev_corr += corr
dev_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/dev%d' % epoch, label_alphabet)
print 'dev loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
dev_err / dev_total, dev_corr, dev_total, dev_corr * 100 / dev_total)
if best_loss < dev_err and best_acc > dev_corr / dev_total:
stop_count += 1
else:
update_loss = False
update_acc = False
stop_count = 0
if best_loss > dev_err:
update_loss = True
best_loss = dev_err
best_epoch_loss = epoch
if best_acc < dev_corr / dev_total:
update_acc = True
best_acc = dev_corr / dev_total
best_epoch_acc = epoch
# evaluate on test data when better performance detected
test_err = 0.0
test_corr = 0.0
test_total = 0
for batch in utils.iterate_minibatches(X_test, Y_test, masks=mask_test, batch_size=batch_size):
inputs, targets, masks, _ = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks)
test_err += err * num
test_corr += corr
test_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/test%d' % epoch, label_alphabet)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
test_err / test_total, test_corr, test_total, test_corr * 100 / test_total)
if update_loss:
best_loss_test_err = test_err
best_loss_test_corr = test_corr
if update_acc:
best_acc_test_err = test_err
best_acc_test_corr = test_corr
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
lr = learning_rate / (1.0 + epoch * decay_rate)
updates = utils.create_updates(loss_train, params, update_algo, lr, momentum=momentum)
train_fn = theano.function([input_var, target_var, mask_var], [loss_train, corr_train, num_loss],
updates=updates)
# print best performance on test data.
logger.info("final best loss test performance (at epoch %d)" % (best_epoch_loss))
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_loss_test_err / test_total, best_loss_test_corr, test_total, best_loss_test_corr * 100 / test_total)
logger.info("final best acc test performance (at epoch %d)" % (best_epoch_acc))
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_acc_test_err / test_total, best_acc_test_corr, test_total, best_acc_test_corr * 100 / test_total)
if __name__ == '__main__':
main()
| 13,356
| 47.394928
| 153
|
py
|
LasagneNLP
|
LasagneNLP-master/bi_lstm_cnn_crf.py
|
__author__ = 'max'
import time
import sys
import argparse
from lasagne_nlp.utils import utils
import lasagne_nlp.utils.data_processor as data_processor
from lasagne_nlp.utils.objectives import crf_loss, crf_accuracy
import lasagne
import theano
import theano.tensor as T
from lasagne_nlp.networks.networks import build_BiLSTM_CNN_CRF
import numpy as np
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional LSTM-CNN-CRF')
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna', 'random'], help='Embedding for words',
required=True)
parser.add_argument('--embedding_dict', default=None, help='path for embedding dict')
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in LSTM')
parser.add_argument('--num_filters', type=int, default=20, help='Number of filters in CNN')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--peepholes', action='store_true', help='Peepholes for LSTM')
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov', 'adadelta'], help='update algorithm',
default='sgd')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training', required=True)
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
parser.add_argument('--patience', type=int, default=5, help='Patience for early stopping')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
def construct_input_layer():
if fine_tune:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
output_size=embedd_dim,
W=embedd_table, name='embedding')
return layer_embedding
else:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
name='input')
return layer_input
def construct_char_input_layer():
layer_char_input = lasagne.layers.InputLayer(shape=(None, max_sent_length, max_char_length),
input_var=char_input_var, name='char-input')
layer_char_input = lasagne.layers.reshape(layer_char_input, (-1, [2]))
layer_char_embedding = lasagne.layers.EmbeddingLayer(layer_char_input, input_size=char_alphabet_size,
output_size=char_embedd_dim, W=char_embedd_table,
name='char_embedding')
layer_char_input = lasagne.layers.DimshuffleLayer(layer_char_embedding, pattern=(0, 2, 1))
return layer_char_input
logger = utils.get_logger("BiLSTM-CNN-CRF")
fine_tune = args.fine_tune
oov = args.oov
regular = args.regular
embedding = args.embedding
embedding_path = args.embedding_dict
train_path = args.train
dev_path = args.dev
test_path = args.test
update_algo = args.update
grad_clipping = args.grad_clipping
peepholes = args.peepholes
num_filters = args.num_filters
gamma = args.gamma
output_predict = args.output_prediction
dropout = args.dropout
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
embedd_table, label_alphabet, \
C_train, C_dev, C_test, char_embedd_table = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
test_path, oov=oov,
fine_tune=fine_tune,
embedding=embedding,
embedding_path=embedding_path,
use_character=True)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
char_input_var = T.itensor3(name='char-inputs')
num_data_char, max_sent_length, max_char_length = C_train.shape
char_alphabet_size, char_embedd_dim = char_embedd_table.shape
assert (max_length == max_sent_length)
assert (num_data == num_data_char)
# construct input and mask layers
layer_incoming1 = construct_char_input_layer()
layer_incoming2 = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-rnn-cnn
num_units = args.num_units
bi_lstm_cnn_crf = build_BiLSTM_CNN_CRF(layer_incoming1, layer_incoming2, num_units, num_labels, mask=layer_mask,
grad_clipping=grad_clipping, peepholes=peepholes, num_filters=num_filters,
dropout=dropout)
logger.info("Network structure: hidden=%d, filter=%d" % (num_units, num_filters))
# compute loss
num_tokens = mask_var.sum(dtype=theano.config.floatX)
# get outpout of bi-lstm-cnn-crf shape [batch, length, num_labels, num_labels]
energies_train = lasagne.layers.get_output(bi_lstm_cnn_crf)
energies_eval = lasagne.layers.get_output(bi_lstm_cnn_crf, deterministic=True)
loss_train = crf_loss(energies_train, target_var, mask_var).mean()
loss_eval = crf_loss(energies_eval, target_var, mask_var).mean()
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(bi_lstm_cnn_crf, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
_, corr_train = crf_accuracy(energies_train, target_var)
corr_train = (corr_train * mask_var).sum(dtype=theano.config.floatX)
prediction_eval, corr_eval = crf_accuracy(energies_eval, target_var)
corr_eval = (corr_eval * mask_var).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = 1.0 if update_algo == 'adadelta' else args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layers.get_all_params(bi_lstm_cnn_crf, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([input_var, target_var, mask_var, char_input_var], [loss_train, corr_train, num_tokens],
updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_eval, corr_eval, num_tokens, prediction_eval])
# Finally, launch the training loop.
logger.info(
"Start training: %s with regularization: %s(%f), dropout: %s, fine tune: %s (#training data: %d, batch size: %d, clip: %.1f, peepholes: %s)..." \
% (
update_algo, regular, (0.0 if regular == 'none' else gamma), dropout, fine_tune, num_data, batch_size,
grad_clipping,
peepholes))
num_batches = num_data / batch_size
num_epochs = 1000
best_loss = 1e+12
best_acc = 0.0
best_epoch_loss = 0
best_epoch_acc = 0
best_loss_test_err = 0.
best_loss_test_corr = 0.
best_acc_test_err = 0.
best_acc_test_corr = 0.
stop_count = 0
lr = learning_rate
patience = args.patience
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_total = 0
train_inst = 0
start_time = time.time()
num_back = 0
train_batches = 0
for batch in utils.iterate_minibatches(X_train, Y_train, masks=mask_train, char_inputs=C_train,
batch_size=batch_size, shuffle=True):
inputs, targets, masks, char_inputs = batch
err, corr, num = train_fn(inputs, targets, masks, char_inputs)
train_err += err * inputs.shape[0]
train_corr += corr
train_total += num
train_inst += inputs.shape[0]
train_batches += 1
time_ave = (time.time() - start_time) / train_batches
time_left = (num_batches - train_batches) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, time left (estimated): %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_inst, train_corr * 100 / train_total, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
assert train_inst == num_data
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, time: %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / num_data, train_corr * 100 / train_total, time.time() - start_time)
# evaluate performance on dev data
dev_err = 0.0
dev_corr = 0.0
dev_total = 0
dev_inst = 0
for batch in utils.iterate_minibatches(X_dev, Y_dev, masks=mask_dev, char_inputs=C_dev, batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
dev_err += err * inputs.shape[0]
dev_corr += corr
dev_total += num
dev_inst += inputs.shape[0]
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/dev%d' % epoch, label_alphabet,
is_flattened=False)
print 'dev loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
dev_err / dev_inst, dev_corr, dev_total, dev_corr * 100 / dev_total)
if best_loss < dev_err and best_acc > dev_corr / dev_total:
stop_count += 1
else:
update_loss = False
update_acc = False
stop_count = 0
if best_loss > dev_err:
update_loss = True
best_loss = dev_err
best_epoch_loss = epoch
if best_acc < dev_corr / dev_total:
update_acc = True
best_acc = dev_corr / dev_total
best_epoch_acc = epoch
# evaluate on test data when better performance detected
test_err = 0.0
test_corr = 0.0
test_total = 0
test_inst = 0
for batch in utils.iterate_minibatches(X_test, Y_test, masks=mask_test, char_inputs=C_test,
batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
test_err += err * inputs.shape[0]
test_corr += corr
test_total += num
test_inst += inputs.shape[0]
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/test%d' % epoch, label_alphabet,
is_flattened=False)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
test_err / test_inst, test_corr, test_total, test_corr * 100 / test_total)
if update_loss:
best_loss_test_err = test_err
best_loss_test_corr = test_corr
if update_acc:
best_acc_test_err = test_err
best_acc_test_corr = test_corr
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
if update_algo != 'adadelta':
lr = learning_rate / (1.0 + epoch * decay_rate)
updates = utils.create_updates(loss_train, params, update_algo, lr, momentum=momentum)
train_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_train, corr_train, num_tokens],
updates=updates)
# print best performance on test data.
logger.info("final best loss test performance (at epoch %d)" % best_epoch_loss)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_loss_test_err / test_inst, best_loss_test_corr, test_total, best_loss_test_corr * 100 / test_total)
logger.info("final best acc test performance (at epoch %d)" % best_epoch_acc)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_acc_test_err / test_inst, best_acc_test_corr, test_total, best_acc_test_corr * 100 / test_total)
def test():
energies_var = T.tensor4('energies', dtype=theano.config.floatX)
targets_var = T.imatrix('targets')
masks_var = T.matrix('masks', dtype=theano.config.floatX)
layer_input = lasagne.layers.InputLayer([2, 2, 3, 3], input_var=energies_var)
out = lasagne.layers.get_output(layer_input)
loss = crf_loss(out, targets_var, masks_var)
prediction, acc = crf_accuracy(energies_var, targets_var)
fn = theano.function([energies_var, targets_var, masks_var], [loss, prediction, acc])
energies = np.array([[[[10, 15, 20], [5, 10, 15], [3, 2, 0]], [[5, 10, 1], [5, 10, 1], [5, 10, 1]]],
[[[5, 6, 7], [2, 3, 4], [2, 1, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], dtype=np.float32)
targets = np.array([[0, 1], [0, 2]], dtype=np.int32)
masks = np.array([[1, 1], [1, 0]], dtype=np.float32)
l, p, a = fn(energies, targets, masks)
print l
print p
print a
if __name__ == '__main__':
main()
| 15,909
| 48.256966
| 153
|
py
|
LasagneNLP
|
LasagneNLP-master/bi_lstm_highcnn.py
|
__author__ = 'max'
import time
import sys
import argparse
from lasagne_nlp.utils import utils
import lasagne_nlp.utils.data_processor as data_processor
import theano.tensor as T
import theano
import lasagne
from lasagne_nlp.networks.networks import build_BiLSTM_HighCNN
import lasagne.nonlinearities as nonlinearities
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional LSTM-HighCNN')
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna'], help='Embedding for words',
required=True)
parser.add_argument('--embedding_dict', default='data/word2vec/GoogleNews-vectors-negative300.bin',
help='path for embedding dict')
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in LSTM')
parser.add_argument('--num_filters', type=int, default=20, help='Number of filters in CNN')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--peepholes', action='store_true', help='Peepholes for LSTM')
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov', 'adadelta'], help='update algorithm', default='sgd')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training', required=True)
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
parser.add_argument('--patience', type=int, default=5, help='Patience for early stopping')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
def construct_input_layer():
if fine_tune:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
output_size=embedd_dim,
W=embedd_table, name='embedding')
return layer_embedding
else:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
name='input')
return layer_input
def construct_char_input_layer():
layer_char_input = lasagne.layers.InputLayer(shape=(None, max_sent_length, max_char_length),
input_var=char_input_var, name='char-input')
layer_char_input = lasagne.layers.reshape(layer_char_input, (-1, [2]))
layer_char_embedding = lasagne.layers.EmbeddingLayer(layer_char_input, input_size=char_alphabet_size,
output_size=char_embedd_dim, W=char_embedd_table,
name='char_embedding')
layer_char_input = lasagne.layers.DimshuffleLayer(layer_char_embedding, pattern=(0, 2, 1))
return layer_char_input
logger = utils.get_logger("BiLSTM-HighCNN")
fine_tune = args.fine_tune
oov = args.oov
regular = args.regular
embedding = args.embedding
embedding_path = args.embedding_dict
train_path = args.train
dev_path = args.dev
test_path = args.test
update_algo = args.update
grad_clipping = args.grad_clipping
peepholes = args.peepholes
num_filters = args.num_filters
gamma = args.gamma
output_predict = args.output_prediction
dropout = args.dropout
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
embedd_table, label_alphabet, \
C_train, C_dev, C_test, char_embedd_table = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
test_path, oov=oov,
fine_tune=fine_tune,
embedding=embedding,
embedding_path=embedding_path,
use_character=True)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
char_input_var = T.itensor3(name='char-inputs')
num_data_char, max_sent_length, max_char_length = C_train.shape
char_alphabet_size, char_embedd_dim = char_embedd_table.shape
assert (max_length == max_sent_length)
assert (num_data == num_data_char)
# construct input and mask layers
layer_incoming1 = construct_char_input_layer()
layer_incoming2 = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-rnn-cnn
num_units = args.num_units
bi_lstm_cnn = build_BiLSTM_HighCNN(layer_incoming1, layer_incoming2, num_units, mask=layer_mask,
grad_clipping=grad_clipping, peepholes=peepholes, num_filters=num_filters,
dropout=dropout)
# reshape bi-rnn-cnn to [batch * max_length, num_units]
bi_lstm_cnn = lasagne.layers.reshape(bi_lstm_cnn, (-1, [2]))
# construct output layer (dense layer with softmax)
layer_output = lasagne.layers.DenseLayer(bi_lstm_cnn, num_units=num_labels, nonlinearity=nonlinearities.softmax,
name='softmax')
# get output of bi-lstm-cnn shape=[batch * max_length, #label]
prediction_train = lasagne.layers.get_output(layer_output)
prediction_eval = lasagne.layers.get_output(layer_output, deterministic=True)
final_prediction = T.argmax(prediction_eval, axis=1)
# flat target_var to vector
target_var_flatten = target_var.flatten()
# flat mask_var to vector
mask_var_flatten = mask_var.flatten()
# compute loss
num_loss = mask_var_flatten.sum(dtype=theano.config.floatX)
# for training, we use mean of loss over number of labels
loss_train = lasagne.objectives.categorical_crossentropy(prediction_train, target_var_flatten)
loss_train = (loss_train * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(layer_output, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
loss_eval = lasagne.objectives.categorical_crossentropy(prediction_eval, target_var_flatten)
loss_eval = (loss_eval * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# compute number of correct labels
corr_train = lasagne.objectives.categorical_accuracy(prediction_train, target_var_flatten)
corr_train = (corr_train * mask_var_flatten).sum(dtype=theano.config.floatX)
corr_eval = lasagne.objectives.categorical_accuracy(prediction_eval, target_var_flatten)
corr_eval = (corr_eval * mask_var_flatten).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = 1.0 if update_algo == 'adadelta' else args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layers.get_all_params(layer_output, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([input_var, target_var, mask_var, char_input_var], [loss_train, corr_train, num_loss],
updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_eval, corr_eval, num_loss, final_prediction])
# Finally, launch the training loop.
logger.info(
"Start training: %s with regularization: %s(%f), dropout: %s, fine tune: %s (#training data: %d, batch size: %d, clip: %.1f, peepholes: %s)..." \
% (
update_algo, regular, (0.0 if regular == 'none' else gamma), dropout, fine_tune, num_data, batch_size, grad_clipping,
peepholes))
num_batches = num_data / batch_size
num_epochs = 1000
best_loss = 1e+12
best_acc = 0.0
best_epoch_loss = 0
best_epoch_acc = 0
best_loss_test_err = 0.
best_loss_test_corr = 0.
best_acc_test_err = 0.
best_acc_test_corr = 0.
stop_count = 0
lr = learning_rate
patience = args.patience
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_total = 0
start_time = time.time()
num_back = 0
train_batches = 0
for batch in utils.iterate_minibatches(X_train, Y_train, masks=mask_train, char_inputs=C_train,
batch_size=batch_size, shuffle=True):
inputs, targets, masks, char_inputs = batch
err, corr, num = train_fn(inputs, targets, masks, char_inputs)
train_err += err * num
train_corr += corr
train_total += num
train_batches += 1
time_ave = (time.time() - start_time) / train_batches
time_left = (num_batches - train_batches) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, time left (estimated): %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, time: %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time.time() - start_time)
# evaluate performance on dev data
dev_err = 0.0
dev_corr = 0.0
dev_total = 0
for batch in utils.iterate_minibatches(X_dev, Y_dev, masks=mask_dev, char_inputs=C_dev, batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
dev_err += err * num
dev_corr += corr
dev_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/dev%d' % epoch, label_alphabet)
print 'dev loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
dev_err / dev_total, dev_corr, dev_total, dev_corr * 100 / dev_total)
if best_loss < dev_err and best_acc > dev_corr / dev_total:
stop_count += 1
else:
update_loss = False
update_acc = False
stop_count = 0
if best_loss > dev_err:
update_loss = True
best_loss = dev_err
best_epoch_loss = epoch
if best_acc < dev_corr / dev_total:
update_acc = True
best_acc = dev_corr / dev_total
best_epoch_acc = epoch
# evaluate on test data when better performance detected
test_err = 0.0
test_corr = 0.0
test_total = 0
for batch in utils.iterate_minibatches(X_test, Y_test, masks=mask_test, char_inputs=C_test,
batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
test_err += err * num
test_corr += corr
test_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/test%d' % epoch, label_alphabet)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
test_err / test_total, test_corr, test_total, test_corr * 100 / test_total)
if update_loss:
best_loss_test_err = test_err
best_loss_test_corr = test_corr
if update_acc:
best_acc_test_err = test_err
best_acc_test_corr = test_corr
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
if update_algo != 'adadelta':
lr = learning_rate / (1.0 + epoch * decay_rate)
updates = utils.create_updates(loss_train, params, update_algo, lr, momentum=momentum)
train_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_train, corr_train, num_loss],
updates=updates)
# print best performance on test data.
logger.info("final best loss test performance (at epoch %d)" % best_epoch_loss)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_loss_test_err / test_total, best_loss_test_corr, test_total, best_loss_test_corr * 100 / test_total)
logger.info("final best acc test performance (at epoch %d)" % best_epoch_acc)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_acc_test_err / test_total, best_acc_test_corr, test_total, best_acc_test_corr * 100 / test_total)
if __name__ == '__main__':
main()
| 15,407
| 50.019868
| 153
|
py
|
LasagneNLP
|
LasagneNLP-master/bi_lstm_cnn.py
|
__author__ = 'max'
import time
import sys
import argparse
from lasagne_nlp.utils import utils
import lasagne_nlp.utils.data_processor as data_processor
import theano.tensor as T
import theano
import lasagne
from lasagne_nlp.networks.networks import build_BiLSTM_CNN
import lasagne.nonlinearities as nonlinearities
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional LSTM-CNN')
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna'], help='Embedding for words',
required=True)
parser.add_argument('--embedding_dict', default='data/word2vec/GoogleNews-vectors-negative300.bin',
help='path for embedding dict')
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in LSTM')
parser.add_argument('--num_filters', type=int, default=20, help='Number of filters in CNN')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--peepholes', action='store_true', help='Peepholes for LSTM')
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov', 'adadelta'], help='update algorithm', default='sgd')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training', required=True)
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
parser.add_argument('--patience', type=int, default=5, help='Patience for early stopping')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
def construct_input_layer():
if fine_tune:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
output_size=embedd_dim,
W=embedd_table, name='embedding')
return layer_embedding
else:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
name='input')
return layer_input
def construct_char_input_layer():
layer_char_input = lasagne.layers.InputLayer(shape=(None, max_sent_length, max_char_length),
input_var=char_input_var, name='char-input')
layer_char_input = lasagne.layers.reshape(layer_char_input, (-1, [2]))
layer_char_embedding = lasagne.layers.EmbeddingLayer(layer_char_input, input_size=char_alphabet_size,
output_size=char_embedd_dim, W=char_embedd_table,
name='char_embedding')
layer_char_input = lasagne.layers.DimshuffleLayer(layer_char_embedding, pattern=(0, 2, 1))
return layer_char_input
logger = utils.get_logger("BiLSTM-CNN")
fine_tune = args.fine_tune
oov = args.oov
regular = args.regular
embedding = args.embedding
embedding_path = args.embedding_dict
train_path = args.train
dev_path = args.dev
test_path = args.test
update_algo = args.update
grad_clipping = args.grad_clipping
peepholes = args.peepholes
num_filters = args.num_filters
gamma = args.gamma
output_predict = args.output_prediction
dropout = args.dropout
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
embedd_table, label_alphabet, \
C_train, C_dev, C_test, char_embedd_table = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
test_path, oov=oov,
fine_tune=fine_tune,
embedding=embedding,
embedding_path=embedding_path,
use_character=True)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
char_input_var = T.itensor3(name='char-inputs')
num_data_char, max_sent_length, max_char_length = C_train.shape
char_alphabet_size, char_embedd_dim = char_embedd_table.shape
assert (max_length == max_sent_length)
assert (num_data == num_data_char)
# construct input and mask layers
layer_incoming1 = construct_char_input_layer()
layer_incoming2 = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-rnn-cnn
num_units = args.num_units
bi_lstm_cnn = build_BiLSTM_CNN(layer_incoming1, layer_incoming2, num_units, mask=layer_mask,
grad_clipping=grad_clipping, peepholes=peepholes, num_filters=num_filters,
dropout=dropout)
# reshape bi-rnn-cnn to [batch * max_length, num_units]
bi_lstm_cnn = lasagne.layers.reshape(bi_lstm_cnn, (-1, [2]))
# construct output layer (dense layer with softmax)
layer_output = lasagne.layers.DenseLayer(bi_lstm_cnn, num_units=num_labels, nonlinearity=nonlinearities.softmax,
name='softmax')
# get output of bi-lstm-cnn shape=[batch * max_length, #label]
prediction_train = lasagne.layers.get_output(layer_output)
prediction_eval = lasagne.layers.get_output(layer_output, deterministic=True)
final_prediction = T.argmax(prediction_eval, axis=1)
# flat target_var to vector
target_var_flatten = target_var.flatten()
# flat mask_var to vector
mask_var_flatten = mask_var.flatten()
# compute loss
num_loss = mask_var_flatten.sum(dtype=theano.config.floatX)
# for training, we use mean of loss over number of labels
loss_train = lasagne.objectives.categorical_crossentropy(prediction_train, target_var_flatten)
loss_train = (loss_train * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(layer_output, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
loss_eval = lasagne.objectives.categorical_crossentropy(prediction_eval, target_var_flatten)
loss_eval = (loss_eval * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# compute number of correct labels
corr_train = lasagne.objectives.categorical_accuracy(prediction_train, target_var_flatten)
corr_train = (corr_train * mask_var_flatten).sum(dtype=theano.config.floatX)
corr_eval = lasagne.objectives.categorical_accuracy(prediction_eval, target_var_flatten)
corr_eval = (corr_eval * mask_var_flatten).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = 1.0 if update_algo == 'adadelta' else args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layers.get_all_params(layer_output, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([input_var, target_var, mask_var, char_input_var], [loss_train, corr_train, num_loss],
updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_eval, corr_eval, num_loss, final_prediction])
# Finally, launch the training loop.
logger.info(
"Start training: %s with regularization: %s(%f), dropout: %s, fine tune: %s (#training data: %d, batch size: %d, clip: %.1f, peepholes: %s)..." \
% (
update_algo, regular, (0.0 if regular == 'none' else gamma), dropout, fine_tune, num_data, batch_size, grad_clipping,
peepholes))
num_batches = num_data / batch_size
num_epochs = 1000
best_loss = 1e+12
best_acc = 0.0
best_epoch_loss = 0
best_epoch_acc = 0
best_loss_test_err = 0.
best_loss_test_corr = 0.
best_acc_test_err = 0.
best_acc_test_corr = 0.
stop_count = 0
lr = learning_rate
patience = args.patience
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_total = 0
start_time = time.time()
num_back = 0
train_batches = 0
for batch in utils.iterate_minibatches(X_train, Y_train, masks=mask_train, char_inputs=C_train,
batch_size=batch_size, shuffle=True):
inputs, targets, masks, char_inputs = batch
err, corr, num = train_fn(inputs, targets, masks, char_inputs)
train_err += err * num
train_corr += corr
train_total += num
train_batches += 1
time_ave = (time.time() - start_time) / train_batches
time_left = (num_batches - train_batches) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, time left (estimated): %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, time: %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time.time() - start_time)
# evaluate performance on dev data
dev_err = 0.0
dev_corr = 0.0
dev_total = 0
for batch in utils.iterate_minibatches(X_dev, Y_dev, masks=mask_dev, char_inputs=C_dev, batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
dev_err += err * num
dev_corr += corr
dev_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/dev%d' % epoch, label_alphabet)
print 'dev loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
dev_err / dev_total, dev_corr, dev_total, dev_corr * 100 / dev_total)
if best_loss < dev_err and best_acc > dev_corr / dev_total:
stop_count += 1
else:
update_loss = False
update_acc = False
stop_count = 0
if best_loss > dev_err:
update_loss = True
best_loss = dev_err
best_epoch_loss = epoch
if best_acc < dev_corr / dev_total:
update_acc = True
best_acc = dev_corr / dev_total
best_epoch_acc = epoch
# evaluate on test data when better performance detected
test_err = 0.0
test_corr = 0.0
test_total = 0
for batch in utils.iterate_minibatches(X_test, Y_test, masks=mask_test, char_inputs=C_test,
batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
test_err += err * num
test_corr += corr
test_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/test%d' % epoch, label_alphabet)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
test_err / test_total, test_corr, test_total, test_corr * 100 / test_total)
if update_loss:
best_loss_test_err = test_err
best_loss_test_corr = test_corr
if update_acc:
best_acc_test_err = test_err
best_acc_test_corr = test_corr
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
if update_algo != 'adadelta':
lr = learning_rate / (1.0 + epoch * decay_rate)
updates = utils.create_updates(loss_train, params, update_algo, lr, momentum=momentum)
train_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_train, corr_train, num_loss],
updates=updates)
# print best performance on test data.
logger.info("final best loss test performance (at epoch %d)" % best_epoch_loss)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_loss_test_err / test_total, best_loss_test_corr, test_total, best_loss_test_corr * 100 / test_total)
logger.info("final best acc test performance (at epoch %d)" % best_epoch_acc)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_acc_test_err / test_total, best_acc_test_corr, test_total, best_acc_test_corr * 100 / test_total)
if __name__ == '__main__':
main()
| 15,385
| 49.94702
| 153
|
py
|
LasagneNLP
|
LasagneNLP-master/bi_rnn.py
|
__author__ = 'max'
import time
import sys
import argparse
from lasagne_nlp.utils import utils
from lasagne_nlp.utils.regularization import dima
import lasagne_nlp.utils.data_processor as data_processor
import theano.tensor as T
import theano
import lasagne
from lasagne_nlp.networks.networks import build_BiRNN
import lasagne.nonlinearities as nonlinearities
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional RNN')
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna'], help='Embedding for words',
required=True)
parser.add_argument('--embedding_dict', default='data/word2vec/GoogleNews-vectors-negative300.bin',
help='path for embedding dict')
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in RNN')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov'], help='update algorithm', default='sgd')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training',
required=True)
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
def construct_input_layer():
if fine_tune:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
output_size=embedd_dim, W=embedd_table, name='embedding')
return layer_embedding
else:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
name='input')
return layer_input
logger = utils.get_logger("BiRNN")
fine_tune = args.fine_tune
oov = args.oov
regular = args.regular
embedding = args.embedding
embedding_path = args.embedding_dict
train_path = args.train
dev_path = args.dev
test_path = args.test
update_algo = args.update
grad_clipping = args.grad_clipping
gamma = args.gamma
output_predict = args.output_prediction
dropout = args.dropout
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
embedd_table, label_alphabet, _, _, _, _ = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
test_path, oov=oov,
fine_tune=fine_tune,
embedding=embedding,
embedding_path=embedding_path)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
# construct input and mask layers
layer_incoming = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-rnn
num_units = args.num_units
bi_rnn = build_BiRNN(layer_incoming, num_units, mask=layer_mask, grad_clipping=grad_clipping,
dropout=dropout)
# reshape bi-rnn to [batch * max_length, num_units]
bi_rnn = lasagne.layers.reshape(bi_rnn, (-1, [2]))
# construct output layer (dense layer with softmax)
layer_output = lasagne.layers.DenseLayer(bi_rnn, num_units=num_labels, nonlinearity=nonlinearities.softmax,
name='softmax')
# get output of bi-rnn shape=[batch * max_length, #label]
prediction_train = lasagne.layers.get_output(layer_output)
prediction_eval = lasagne.layers.get_output(layer_output, deterministic=True)
final_prediction = T.argmax(prediction_eval, axis=1)
# flat target_var to vector
target_var_flatten = target_var.flatten()
# flat mask_var to vector
mask_var_flatten = mask_var.flatten()
# compute loss
num_loss = mask_var_flatten.sum(dtype=theano.config.floatX)
# for training, we use mean of loss over number of labels
loss_train = lasagne.objectives.categorical_crossentropy(prediction_train, target_var_flatten)
loss_train = (loss_train * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
############################################
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(layer_output, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
# dima regularization?
# if regular == 'dima':
# params_regular = utils.get_all_params_by_name(layer_output, name=['forward.hidden_to_hidden.W',
# 'backward.hidden_to_hidden.W'])
# dima_penalty = lasagne.regularization.apply_penalty(params_regular, dima)
# loss_train = loss_train + gamma * dima_penalty
loss_eval = lasagne.objectives.categorical_crossentropy(prediction_eval, target_var_flatten)
loss_eval = (loss_eval * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# compute number of correct labels
corr_train = lasagne.objectives.categorical_accuracy(prediction_train, target_var_flatten)
corr_train = (corr_train * mask_var_flatten).sum(dtype=theano.config.floatX)
corr_eval = lasagne.objectives.categorical_accuracy(prediction_eval, target_var_flatten)
corr_eval = (corr_eval * mask_var_flatten).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layers.get_all_params(layer_output, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([input_var, target_var, mask_var], [loss_train, corr_train, num_loss], updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([input_var, target_var, mask_var], [loss_eval, corr_eval, num_loss, final_prediction])
# Finally, launch the training loop.
logger.info(
"Start training: %s with regularization: %s(%f), dropout: %s, fine tune: %s (#training data: %d, batch size: %d, clip: %.1f)..." \
% (
update_algo, regular, (0.0 if regular == 'none' else gamma), dropout, fine_tune, num_data, batch_size, grad_clipping))
num_batches = num_data / batch_size
num_epochs = 1000
best_loss = 1e+12
best_acc = 0.0
best_epoch_loss = 0
best_epoch_acc = 0
best_loss_test_err = 0.
best_loss_test_corr = 0.
best_acc_test_err = 0.
best_acc_test_corr = 0.
stop_count = 0
lr = learning_rate
patience = 5
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_total = 0
start_time = time.time()
num_back = 0
train_batches = 0
for batch in utils.iterate_minibatches(X_train, Y_train, masks=mask_train, batch_size=batch_size, shuffle=True):
inputs, targets, masks, _ = batch
err, corr, num = train_fn(inputs, targets, masks)
train_err += err * num
train_corr += corr
train_total += num
train_batches += 1
time_ave = (time.time() - start_time) / train_batches
time_left = (num_batches - train_batches) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, time left (estimated): %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, time: %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time.time() - start_time)
# evaluate performance on dev data
dev_err = 0.0
dev_corr = 0.0
dev_total = 0
for batch in utils.iterate_minibatches(X_dev, Y_dev, masks=mask_dev, batch_size=batch_size):
inputs, targets, masks, _ = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks)
dev_err += err * num
dev_corr += corr
dev_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/dev%d' % epoch, label_alphabet)
print 'dev loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
dev_err / dev_total, dev_corr, dev_total, dev_corr * 100 / dev_total)
if best_loss < dev_err and best_acc > dev_corr / dev_total:
stop_count += 1
else:
update_loss = False
update_acc = False
stop_count = 0
if best_loss > dev_err:
update_loss = True
best_loss = dev_err
best_epoch_loss = epoch
if best_acc < dev_corr / dev_total:
update_acc = True
best_acc = dev_corr / dev_total
best_epoch_acc = epoch
# evaluate on test data when better performance detected
test_err = 0.0
test_corr = 0.0
test_total = 0
for batch in utils.iterate_minibatches(X_test, Y_test, masks=mask_test, batch_size=batch_size):
inputs, targets, masks, _ = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks)
test_err += err * num
test_corr += corr
test_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/test%d' % epoch, label_alphabet)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
test_err / test_total, test_corr, test_total, test_corr * 100 / test_total)
if update_loss:
best_loss_test_err = test_err
best_loss_test_corr = test_corr
if update_acc:
best_acc_test_err = test_err
best_acc_test_corr = test_corr
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
lr = learning_rate / (1.0 + epoch * decay_rate)
updates = utils.create_updates(loss_train, params, update_algo, lr, momentum=momentum)
train_fn = theano.function([input_var, target_var, mask_var], [loss_train, corr_train, num_loss],
updates=updates)
# print best performance on test data.
logger.info("final best loss test performance (at epoch %d)" % best_epoch_loss)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_loss_test_err / test_total, best_loss_test_corr, test_total, best_loss_test_corr * 100 / test_total)
logger.info("final best acc test performance (at epoch %d)" % best_epoch_acc)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_acc_test_err / test_total, best_acc_test_corr, test_total, best_acc_test_corr * 100 / test_total)
if __name__ == '__main__':
main()
| 13,596
| 47.734767
| 138
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/__init__.py
|
__author__ = 'max'
| 19
| 9
| 18
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/networks/parser.py
|
__author__ = 'max'
import theano.tensor as T
from lasagne.layers import MergeLayer
from lasagne import init
__all__ = [
"DepParserLayer",
]
class DepParserLayer(MergeLayer):
"""
"""
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(DepParserLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels
num_inputs = self.input_shape[2]
# add parameters
self.W_h = self.add_param(W_h, (num_inputs, self.num_labels), name='W_h')
self.W_c = self.add_param(W_c, (num_inputs, self.num_labels), name='W_c')
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
def get_output_shape_for(self, input_shapes):
"""
:param input_shapes:
:return: the shape of output [batch_size, length, length, num_labels]
"""
input_shape = input_shapes[0]
return input_shape[0], input_shape[1], input_shape[1], self.num_labels
def get_output_for(self, inputs, **kwargs):
"""
:param inputs: inputs: list of theano.TensorType
`inputs[0]` should always be the symbolic input variable. When
this layer has a mask input (i.e. was instantiated with
`mask_input != None`, indicating that the lengths of sequences in
each batch vary), `inputs` should have length 2, where `inputs[1]`
is the `mask`. The `mask` should be supplied as a Theano variable
denoting whether each time step in each sequence in the batch is
part of the sequence or not. `mask` should be a matrix of shape
``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
(length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
of sequence i)``.
:return: theano.TensorType
Symbolic output variable.
"""
input = inputs[0]
mask = None
if self.mask_incoming_index > 0:
mask = inputs[self.mask_incoming_index]
# compute head part by tensor dot ([batch, length, input] * [input, num_label]
# the shape of s_h should be [batch, length, num_label]
s_h = T.tensordot(input, self.W_h, axes=[[2], [0]])
if self.b is not None:
b_shuffled = self.b.dimshuffle('x', 'x', 0)
s_h = s_h + b_shuffled
# compute child part by tensor dot ([batch, length, input] * [input, num_label]
# the shape of s_c should be [batch, length, num_label]
s_c = T.tensordot(input, self.W_c, axes=[[2], [0]])
# compute out
input_shape = input.shape
out = T.cast(T.alloc(0.0, input_shape[0], input_shape[1], input_shape[1], self.num_labels), 'floatX')
out = out + s_h.dimshuffle(0, 1, 'x', 2)
out = out + s_c.dimshuffle(0, 'x', 1, 2)
if mask is not None:
mask_shuffled = mask.dimshuffle(0, 1, 'x', 'x')
out = out * mask_shuffled
mask_shuffled = mask.dimshuffle(0, 'x', 1, 'x')
out = out * mask_shuffled
return out
| 3,727
| 36.656566
| 113
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/networks/highway.py
|
__author__ = 'max'
import theano.tensor as T
import numpy as np
from lasagne import init
from lasagne.layers import Layer
import lasagne.nonlinearities as nonlinearities
__all__ = [
"HighwayDenseLayer",
]
class HighwayDenseLayer(Layer):
"""
lasagne_nlp.networks.highway.HighwayDenseLayer(incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.),
W_t=init.GlorotUniform(), b_t=init.Constant(0.), **kwargs)
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
W_h : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a matrix with shape ``(num_inputs, num_inputs)``.
See :func:`lasagne.utils.create_param` for more information.
b_h : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_inputs,)``.
See :func:`lasagne.utils.create_param` for more information.
W_t : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a matrix with shape ``(num_inputs, num_inputs)``.
See :func:`lasagne.utils.create_param` for more information.
b_t : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_inputs,)``.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
"""
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
num_inputs = int(np.prod(self.input_shape[1:]))
self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
if b_h is None:
self.b_h = None
else:
self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)
self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
if b_t is None:
self.b_t = None
else:
self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
def get_output_shape_for(self, input_shape):
return input_shape
def get_output_for(self, input, **kwargs):
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input_reshape = input.flatten(2) if input.ndim > 2 else input
activation = T.dot(input_reshape, self.W_h)
if self.b_h is not None:
activation = activation + self.b_h.dimshuffle('x', 0)
activation = self.nonlinearity(activation)
transform = T.dot(input_reshape, self.W_t)
if self.b_t is not None:
transform = transform + self.b_t.dimshuffle('x', 0)
transform = nonlinearities.sigmoid(transform)
carry = 1.0 - transform
output = activation * transform + input_reshape * carry
# reshape output back to orignal input_shape
if input.ndim > 2:
output = T.reshape(output, input.shape)
return output
| 3,869
| 39.736842
| 109
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/networks/networks.py
|
__author__ = 'max'
import lasagne
import lasagne.nonlinearities as nonlinearities
from lasagne.layers import Gate
from lasagne_nlp.networks.crf import CRFLayer
from lasagne_nlp.networks.highway import HighwayDenseLayer
def build_BiRNN(incoming, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
precompute_input=True, dropout=True, in_to_out=False):
# construct the forward and backward rnns. Now, Ws are initialized by He initializer with default arguments.
# Need to try other initializers for specific tasks.
# dropout for incoming
if dropout:
incoming = lasagne.layers.DropoutLayer(incoming, p=0.5)
rnn_forward = lasagne.layers.RecurrentLayer(incoming, num_units,
mask_input=mask, grad_clipping=grad_clipping,
nonlinearity=nonlinearity, precompute_input=precompute_input,
W_in_to_hid=lasagne.init.GlorotUniform(),
W_hid_to_hid=lasagne.init.GlorotUniform(), name='forward')
rnn_backward = lasagne.layers.RecurrentLayer(incoming, num_units,
mask_input=mask, grad_clipping=grad_clipping,
nonlinearity=nonlinearity, precompute_input=precompute_input,
W_in_to_hid=lasagne.init.GlorotUniform(),
W_hid_to_hid=lasagne.init.GlorotUniform(), backwards=True,
name='backward')
# concatenate the outputs of forward and backward RNNs to combine them.
concat = lasagne.layers.concat([rnn_forward, rnn_backward], axis=2, name="bi-rnn")
# dropout for output
if dropout:
concat = lasagne.layers.DropoutLayer(concat, p=0.5)
if in_to_out:
concat = lasagne.layers.concat([concat, incoming], axis=2)
# the shape of BiRNN output (concat) is (batch_size, input_length, 2 * num_hidden_units)
return concat
def build_BiLSTM(incoming, num_units, mask=None, grad_clipping=0, precompute_input=True, peepholes=False, dropout=True,
in_to_out=False):
# construct the forward and backward rnns. Now, Ws are initialized by Glorot initializer with default arguments.
# Need to try other initializers for specific tasks.
# dropout for incoming
if dropout:
incoming = lasagne.layers.DropoutLayer(incoming, p=0.5)
ingate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1))
outgate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1))
# according to Jozefowicz et al.(2015), init bias of forget gate to 1.
forgetgate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1), b=lasagne.init.Constant(1.))
# now use tanh for nonlinear function of cell, need to try pure linear cell
cell_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
lstm_forward = lasagne.layers.LSTMLayer(incoming, num_units, mask_input=mask, grad_clipping=grad_clipping,
nonlinearity=nonlinearities.tanh, peepholes=peepholes,
precompute_input=precompute_input,
ingate=ingate_forward, outgate=outgate_forward,
forgetgate=forgetgate_forward, cell=cell_forward, name='forward')
ingate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1))
outgate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1))
# according to Jozefowicz et al.(2015), init bias of forget gate to 1.
forgetgate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1), b=lasagne.init.Constant(1.))
# now use tanh for nonlinear function of cell, need to try pure linear cell
cell_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
lstm_backward = lasagne.layers.LSTMLayer(incoming, num_units, mask_input=mask, grad_clipping=grad_clipping,
nonlinearity=nonlinearities.tanh, peepholes=peepholes,
precompute_input=precompute_input, backwards=True,
ingate=ingate_backward, outgate=outgate_backward,
forgetgate=forgetgate_backward, cell=cell_backward, name='backward')
# concatenate the outputs of forward and backward RNNs to combine them.
concat = lasagne.layers.concat([lstm_forward, lstm_backward], axis=2, name="bi-lstm")
# dropout for output
if dropout:
concat = lasagne.layers.DropoutLayer(concat, p=0.5)
if in_to_out:
concat = lasagne.layers.concat([concat, incoming], axis=2)
# the shape of BiRNN output (concat) is (batch_size, input_length, 2 * num_hidden_units)
return concat
def build_BiGRU(incoming, num_units, mask=None, grad_clipping=0, precompute_input=True, dropout=True, in_to_out=False):
# construct the forward and backward grus. Now, Ws are initialized by Glorot initializer with default arguments.
# Need to try other initializers for specific tasks.
# dropout for incoming
if dropout:
incoming = lasagne.layers.DropoutLayer(incoming, p=0.5)
# according to Jozefowicz et al.(2015), init bias of forget gate to 1.
resetgate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1), b=lasagne.init.Constant(1.))
updategate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1))
# now use tanh for nonlinear function of hidden gate
hidden_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
gru_forward = lasagne.layers.GRULayer(incoming, num_units, mask_input=mask, grad_clipping=grad_clipping,
precompute_input=precompute_input,
resetgate=resetgate_forward, updategate=updategate_forward,
hidden_update=hidden_forward, name='forward')
# according to Jozefowicz et al.(2015), init bias of forget gate to 1.
resetgate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1), b=lasagne.init.Constant(1.))
updategate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.Uniform(range=0.1))
# now use tanh for nonlinear function of hidden gate
hidden_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
gru_backward = lasagne.layers.GRULayer(incoming, num_units, mask_input=mask, grad_clipping=grad_clipping,
precompute_input=precompute_input, backwards=True,
resetgate=resetgate_backward, updategate=updategate_backward,
hidden_update=hidden_backward, name='backward')
# concatenate the outputs of forward and backward GRUs to combine them.
concat = lasagne.layers.concat([gru_forward, gru_backward], axis=2, name="bi-gru")
# dropout for output
if dropout:
concat = lasagne.layers.DropoutLayer(concat, p=0.5)
if in_to_out:
concat = lasagne.layers.concat([concat, incoming], axis=2)
# the shape of BiRNN output (concat) is (batch_size, input_length, 2 * num_hidden_units)
return concat
def build_BiRNN_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
precompute_input=True, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn?
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match rnn incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
return build_BiRNN(incoming, num_units, mask=mask, grad_clipping=grad_clipping, nonlinearity=nonlinearity,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
def build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
peepholes=False, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn?
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
def build_BiGRU_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn?
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
return build_BiGRU(incoming, num_units, mask=mask, grad_clipping=grad_clipping, precompute_input=precompute_input,
dropout=dropout, in_to_out=in_to_out)
def build_BiLSTM_CNN_CRF(incoming1, incoming2, num_units, num_labels, mask=None, grad_clipping=0, precompute_input=True,
peepholes=False, num_filters=20, dropout=True, in_to_out=False):
bi_lstm_cnn = build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=mask, grad_clipping=grad_clipping,
precompute_input=precompute_input, peepholes=peepholes,
num_filters=num_filters, dropout=dropout, in_to_out=in_to_out)
return CRFLayer(bi_lstm_cnn, num_labels, mask_input=mask)
def build_BiLSTM_HighCNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
peepholes=False, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match highway incoming layer [batch * sent_length, num_filters, 1] --> [batch * sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], -1))
# dropout after cnn?
# if dropout:
# output_cnn_layer = lasagne.layers.DropoutLayer(output_cnn_layer, p=0.5)
# construct highway layer
highway_layer = HighwayDenseLayer(output_cnn_layer, nonlinearity=nonlinearities.rectify)
# reshape the layer to match lstm incoming layer [batch * sent_length, num_filters] --> [batch, sent_length, number_filters]
output_highway_layer = lasagne.layers.reshape(highway_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_highway_layer, incoming2], axis=2)
return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
def build_BiLSTM_HighCNN_CRF(incoming1, incoming2, num_units, num_labels, mask=None, grad_clipping=0,
precompute_input=True, peepholes=False, num_filters=20, dropout=True, in_to_out=False):
bi_lstm_cnn = build_BiLSTM_HighCNN(incoming1, incoming2, num_units, mask=mask, grad_clipping=grad_clipping,
precompute_input=precompute_input, peepholes=peepholes,
num_filters=num_filters, dropout=dropout, in_to_out=in_to_out)
return CRFLayer(bi_lstm_cnn, num_labels, mask_input=mask)
| 16,251
| 56.631206
| 132
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/networks/graph.py
|
__author__ = 'max'
import theano.tensor as T
from lasagne.layers import MergeLayer
from lasagne import init
import lasagne.nonlinearities as nonlinearities
from theano.tensor.sort import argsort
__all__ = [
"GraphConvLayer",
]
class GraphConvLayer(MergeLayer):
"""
lasagne_nlp.networks.graph.GraphConvLayer(incoming_vertex, incoming_edge, num_filters, filter_size,
W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs)
Parameters
----------
incoming_vertex : a :class:`lasagne.layers.Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
The output of this layer should be a 3D tensor with shape
``(batch_size, number_input_channels, number_vertex)``
incoming_edge : a :class:`lasagne.layers.Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
The output of this layer should be a 4D tensor with shape
``(batch_size, number_distance_metric, number_vertex, number_vertex)``
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or an `n`-element tuple specifying the size of the filters.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a tensor with shape ``(number_distance_metric * filter_size * number_input_channels, num_filters)``,
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
"""
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
self.vertex_shape = incoming_vertex.output_shape
self.edge_shape = incoming_edge.output_shape
self.input_shape = incoming_vertex.output_shape
incomings = [incoming_vertex, incoming_edge]
self.vertex_incoming_index = 0
self.edge_incoming_index = 1
super(GraphConvLayer, self).__init__(incomings, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.vertex_shape[1]
num_dist_metrics = self.edge_shape[1]
return num_dist_metrics * num_input_channels * self.filter_size, self.num_filters
def get_output_shape_for(self, input_shapes):
vertex_shape = input_shapes[self.vertex_incoming_index]
return vertex_shape[0], self.num_filters, vertex_shape[2]
def get_output_for(self, inputs, **kwargs):
"""
Compute this layer's output function given a symbolic input variable.
Parameters
----------
:param inputs: list of theano.TensorType
`inputs[0]` should always be the symbolic vertex variable.
`inputs[1]` should always be the symbolic edge variable.
:return: theano.TensorType
Symbolic output variable.
"""
vertex = inputs[self.vertex_incoming_index]
# shuffle vertex to shape [batch, n, channel]
vertex = vertex.dimshuffle(0, 2, 1)
# get each dimension
vertex_shape = vertex.shape
batch_size = vertex_shape[0]
num_vertex = vertex_shape[1]
num_channel = vertex_shape[2]
num_dist_metrics = self.edge_shape[1]
filter_size = self.filter_size
num_filters = self.num_filters
# vertex_conv shape [batch, n, n, channel]
vertex_conv = T.cast(T.alloc(0.0, batch_size, num_vertex, num_vertex, num_channel), 'floatX')
vertex_conv = vertex_conv + vertex.dimshuffle(0, 'x', 1, 2)
# reshape vertex_conv to [batch * n, n, channel]
vertex_conv = T.reshape(vertex_conv, (batch_size * num_vertex, num_vertex, num_channel))
edge = inputs[self.edge_incoming_index]
edge_sorted_indices = argsort(edge, axis=3)
# take last filter_size indices. the shape of edge_sorted_indices is [batch, d, n, k]
edge_sorted_indices = edge_sorted_indices[:, :, :, :filter_size]
# shuffle indices to shape [batch, n, d, k]
edge_sorted_indices = edge_sorted_indices.dimshuffle(0, 2, 1, 3)
# reshape indices to shape [batch * n, d * k]
edge_sorted_indices = T.reshape(edge_sorted_indices, (batch_size * num_vertex, num_dist_metrics * filter_size))
# compute conv_tensor with shape [d * k, batch * n, channel]
conv = vertex_conv[T.arange(batch_size * num_vertex), edge_sorted_indices.T, :]
# shuffle conv to [batch * n, d * k, channel]
conv = conv.dimshuffle(1, 0, 2)
# reshape conv to [batch * n, d * k * channel]
conv = T.reshape(conv, (batch_size * num_vertex, num_dist_metrics * filter_size * num_channel))
# dot conv with W ([batch * n, d * k * channel] x [d * k * channel, num_filters] = [batch * n, num_filters]
activation = T.dot(conv, self.W)
if self.b is not None:
activation = activation + self.b.dimshuffle('x', 0)
# apply nonlinear function
activation = self.nonlinearity(activation)
# reshape activation back to [batch, n, num_filters]
activation = T.reshape(activation, (batch_size, num_vertex, num_filters))
# shuffle it to [batch, num_filters, n]
return activation.dimshuffle(0, 2, 1)
| 6,385
| 43.657343
| 124
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/networks/dropout.py
|
__author__ = 'max'
import theano
from lasagne.random import get_rng
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from lasagne.layers import Layer
__all__ = [
"GaussianDropoutLayer",
]
class GaussianDropoutLayer(Layer):
"""Gaussian Dropout layer
Multiply values by gaussian variables with mean 1.0 and standard variance sigma. See notes for disabling dropout
during testing.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
the layer feeding into this layer, or the expected input shape
sigma : float or scalar tensor
The standard variance for gaussian distribution
"""
def __init__(self, incoming, sigma=1.0, **kwargs):
super(GaussianDropoutLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
self.sigma = sigma
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true dropout and scaling is disabled, see notes
"""
if deterministic or self.sigma == 0:
return input
else:
# use nonsymbolic shape for dropout mask if possible
input_shape = self.input_shape
if any(s is None for s in input_shape):
input_shape = input.shape
return input * self._srng.normal(input_shape, avg=1.0, std=self.sigma, dtype=theano.config.floatX)
gaussian_dropout = GaussianDropoutLayer # shortcut
| 1,635
| 30.461538
| 116
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/networks/crf.py
|
__author__ = 'max'
import theano.tensor as T
from lasagne import init
from lasagne.layers import MergeLayer
class CRFLayer(MergeLayer):
"""
lasagne_nlp.networks.crf.CRFLayer(incoming, num_labels,
mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs)
Parameters
----------
incoming : a :class:`lasagne.layers.Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
The output of this layer should be a 3D tensor with shape
``(batch_size, input_length, num_input_features)``
num_labels : int
The number of labels of the crf layer
mask_input : :class:`lasagne.layers.Layer`
Layer which allows for a sequence mask to be input, for when sequences
are of variable length. Default `None`, which means no mask will be
supplied (i.e. all sequences are of the same length).
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a tensor with shape ``(num_inputs, num_units)``,
where ``num_inputs`` is the size of the second dimension of the input.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_units,)
"""
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(CRFLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels + 1
self.pad_label_index = num_labels
num_inputs = self.input_shape[2]
self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
def get_output_shape_for(self, input_shapes):
input_shape = input_shapes[0]
return input_shape[0], input_shape[1], self.num_labels, self.num_labels
def get_output_for(self, inputs, **kwargs):
"""
Compute this layer's output function given a symbolic input variable.
Parameters
----------
:param inputs: list of theano.TensorType
`inputs[0]` should always be the symbolic input variable. When
this layer has a mask input (i.e. was instantiated with
`mask_input != None`, indicating that the lengths of sequences in
each batch vary), `inputs` should have length 2, where `inputs[1]`
is the `mask`. The `mask` should be supplied as a Theano variable
denoting whether each time step in each sequence in the batch is
part of the sequence or not. `mask` should be a matrix of shape
``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
(length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
of sequence i)``.
:return: theano.TensorType
Symbolic output variable.
"""
input = inputs[0]
mask = None
if self.mask_incoming_index > 0:
mask = inputs[self.mask_incoming_index]
# compute out by tensor dot ([batch, length, input] * [input, num_label, num_label]
# the shape of out should be [batch, length, num_label, num_label]
out = T.tensordot(input, self.W, axes=[[2], [0]])
if self.b is not None:
b_shuffled = self.b.dimshuffle('x', 'x', 0, 1)
out = out + b_shuffled
if mask is not None:
mask_shuffled = mask.dimshuffle(0, 1, 'x', 'x')
out = out * mask_shuffled
return out
| 4,409
| 42.235294
| 117
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/networks/__init__.py
|
__author__ = 'max'
| 19
| 9
| 18
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/theano/__init__.py
|
__author__ = 'max'
| 19
| 9
| 18
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/theano/nlinalg.py
|
from __future__ import print_function
__author__ = 'max'
import numpy
import theano
from theano.tensor import as_tensor_variable
from theano.gof import Op, Apply
from theano.tensor.nlinalg import matrix_inverse
class LogAbsDet(Op):
"""
Computes the logarithm of absolute determinants of a sequence of square
matrix M, log(abs(det(M))), on CPU. Avoids det(M) overflow/
underflow.
TODO: add GPU code!
"""
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
o = theano.tensor.scalar(dtype=x.dtype)
return Apply(self, [x], [o])
def perform(self, node, inputs, outputs, params=None):
# MAX = 10000.
# MIN = -10000.
try:
(x,) = inputs
(z,) = outputs
s = numpy.linalg.svd(x, compute_uv=False)
log_abs_det = numpy.sum(numpy.log(numpy.abs(s)))
# numpy.clip(log_abs_det, MIN, MAX)
z[0] = numpy.asarray(log_abs_det, dtype=x.dtype)
except Exception:
print('Failed to compute logabsdet of {}.'.format(x))
raise
def grad(self, inputs, g_outputs):
[gz] = g_outputs
[x] = inputs
return [gz * matrix_inverse(x).T]
def __str__(self):
return "LogAbsDet"
logabsdet = LogAbsDet()
| 1,320
| 24.403846
| 75
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/utils/regularization.py
|
__author__ = 'max'
import theano.tensor as T
import theano.tensor.nlinalg as nlinalg
def dima(x):
return -T.log(nlinalg.Det()(T.dot(x.T, x)))
| 148
| 17.625
| 47
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/utils/utils.py
|
__author__ = 'max'
import logging
import sys
import numpy as np
import lasagne
from gensim.models.word2vec import Word2Vec
import gzip
import theano
def get_logger(name, level=logging.INFO, handler=sys.stdout,
formatter='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(formatter)
stream_handler = logging.StreamHandler(handler)
stream_handler.setLevel(level)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def load_word_embedding_dict(embedding, embedding_path, word_alphabet, logger, embedd_dim=100):
"""
load word embeddings from file
:param embedding:
:param embedding_path:
:param logger:
:return: embedding dict, embedding dimention, caseless
"""
if embedding == 'word2vec':
# loading word2vec
logger.info("Loading word2vec ...")
word2vec = Word2Vec.load_word2vec_format(embedding_path, binary=True)
embedd_dim = word2vec.vector_size
return word2vec, embedd_dim, False
elif embedding == 'glove':
# loading GloVe
logger.info("Loading GloVe ...")
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
embedd_dict[tokens[0]] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'senna':
# loading Senna
logger.info("Loading Senna ...")
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
embedd_dict[tokens[0]] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'random':
# loading random embedding table
logger.info("Loading Random ...")
embedd_dict = dict()
words = word_alphabet.get_content()
scale = np.sqrt(3.0 / embedd_dim)
for word in words:
embedd_dict[word] = np.random.uniform(-scale, scale, [1, embedd_dim])
return embedd_dict, embedd_dim, False
else:
raise ValueError("embedding should choose from [word2vec, senna]")
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
def iterate_minibatches(inputs, targets, masks=None, char_inputs=None, batch_size=10, shuffle=False):
assert len(inputs) == len(targets)
if masks is not None:
assert len(inputs) == len(masks)
if char_inputs is not None:
assert len(inputs) == len(char_inputs)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs), batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt], (None if masks is None else masks[excerpt]), \
(None if char_inputs is None else char_inputs[excerpt])
def create_updates(loss, params, update_algo, learning_rate, momentum=None):
"""
create updates for training
:param loss: loss for gradient
:param params: parameters for update
:param update_algo: update algorithm
:param learning_rate: learning rate
:param momentum: momentum
:return: updates
"""
if update_algo == 'sgd':
return lasagne.updates.sgd(loss, params=params, learning_rate=learning_rate)
elif update_algo == 'momentum':
return lasagne.updates.momentum(loss, params=params, learning_rate=learning_rate, momentum=momentum)
elif update_algo == 'nesterov':
return lasagne.updates.nesterov_momentum(loss, params=params, learning_rate=learning_rate, momentum=momentum)
elif update_algo == 'adadelta':
return lasagne.updates.adadelta(loss, params=params)
elif update_algo == 'adam':
return lasagne.updates.adam(loss, params=params, learning_rate=learning_rate)
else:
raise ValueError('unkown update algorithm: %s' % update_algo)
def get_all_params_by_name(layer, name=None, **tags):
# tags['trainable'] = tags.get('trainable', True)
# tags['regularizable'] = tags.get('regularizable', True)
params = lasagne.layers.get_all_params(layer, **tags)
if name is None:
return params
else:
name_set = set(name) if isinstance(name, list) else set([name, ])
return [param for param in params if param.name in name_set]
def output_predictions(predictions, targets, masks, filename, label_alphabet, is_flattened=True):
batch_size, max_length = targets.shape
with open(filename, 'a') as file:
for i in range(batch_size):
for j in range(max_length):
if masks[i, j] > 0.:
prediction = predictions[i * max_length + j] + 1 if is_flattened else predictions[i, j] + 1
file.write('_ %s %s\n' % (label_alphabet.get_instance(targets[i, j] + 1),
label_alphabet.get_instance(prediction)))
file.write('\n')
| 6,567
| 38.329341
| 117
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/utils/data_processor.py
|
__author__ = 'max'
import numpy as np
import theano
from alphabet import Alphabet
from lasagne_nlp.utils import utils as utils
root_symbol = "##ROOT##"
root_label = "<ROOT>"
word_end = "##WE##"
MAX_LENGTH = 130
MAX_CHAR_LENGTH = 45
logger = utils.get_logger("LoadData")
def read_conll_sequence_labeling(path, word_alphabet, label_alphabet, word_column=1, label_column=4):
"""
read data from file in conll format
:param path: file path
:param word_column: the column index of word (start from 0)
:param label_column: the column of label (start from 0)
:param word_alphabet: alphabet of words
:param label_alphabet: alphabet -f labels
:return: sentences of words and labels, sentences of indexes of words and labels.
"""
word_sentences = []
label_sentences = []
word_index_sentences = []
label_index_sentences = []
words = []
labels = []
word_ids = []
label_ids = []
num_tokens = 0
with open(path) as file:
for line in file:
line.decode('utf-8')
if line.strip() == "":
if 0 < len(words) <= MAX_LENGTH:
word_sentences.append(words[:])
label_sentences.append(labels[:])
word_index_sentences.append(word_ids[:])
label_index_sentences.append(label_ids[:])
num_tokens += len(words)
else:
if len(words) != 0:
logger.info("ignore sentence with length %d" % (len(words)))
words = []
labels = []
word_ids = []
label_ids = []
else:
tokens = line.strip().split()
word = tokens[word_column]
label = tokens[label_column]
words.append(word)
labels.append(label)
word_id = word_alphabet.get_index(word)
label_id = label_alphabet.get_index(label)
word_ids.append(word_id)
label_ids.append(label_id)
if 0 < len(words) <= MAX_LENGTH:
word_sentences.append(words[:])
label_sentences.append(labels[:])
word_index_sentences.append(word_ids[:])
label_index_sentences.append(label_ids[:])
num_tokens += len(words)
else:
if len(words) != 0:
logger.info("ignore sentence with length %d" % (len(words)))
logger.info("#sentences: %d, #tokens: %d" % (len(word_sentences), num_tokens))
return word_sentences, label_sentences, word_index_sentences, label_index_sentences
def read_conll_parsing(path, word_alphabet, pos_alphabet, type_alphabet, word_column=1, pos_column=4, head_column=6,
type_column=7):
"""
read data from conll format for parsing
:param path: ile path
:param word_alphabet:
:param pos_alphabet:
:param type_alphabet:
:param word_column: the column index of word (start from 0)
:param pos_column: the column index of pos (start from 0)
:param head_column: the column index of head (start from 0)
:param type_column: the column index of types (start from 0)
:return:
"""
word_sentences = []
pos_sentences = []
head_sentences = []
type_sentence = []
word_index_sentences = []
pos_index_sentences = []
type_index_sentences = []
words = []
poss = []
heads = []
types = []
word_ids = []
pos_ids = []
type_ids = []
# initialization
root_word_id = word_alphabet.get_index(root_symbol)
root_pos_id = pos_alphabet.get_index(root_symbol)
root_type_id = type_alphabet.get_index(root_label)
logger.info('Root symbol index: word=%d, pos=%d, type=%d' % (root_word_id, root_pos_id, root_type_id))
words.append(root_symbol)
poss.append(root_symbol)
heads.append(-1)
types.append((root_label))
word_ids.append(root_word_id)
pos_ids.append(root_pos_id)
type_ids.append(root_type_id)
num_tokens = 0
with open(path) as file:
for line in file:
line.decode('utf-8')
if line.strip() == "":
if 1 < len(words) <= MAX_LENGTH:
word_sentences.append(words[:])
pos_sentences.append(poss[:])
head_sentences.append((heads[:]))
type_sentence.append(types[:])
word_index_sentences.append(word_ids[:])
pos_index_sentences.append(pos_ids[:])
type_index_sentences.append(type_ids[:])
num_tokens += len(words)
else:
if len(words) != 0:
logger.info("ignore sentence with length %d" % (len(words)))
words = []
poss = []
heads = []
types = []
word_ids = []
pos_ids = []
type_ids = []
words.append(root_symbol)
poss.append(root_symbol)
heads.append(-1)
types.append((root_label))
word_ids.append(root_word_id)
pos_ids.append(root_pos_id)
type_ids.append(root_type_id)
else:
tokens = line.strip().split()
word = tokens[word_column]
pos = tokens[pos_column]
head = int(tokens[head_column])
type = tokens[type_column]
words.append(word)
poss.append(pos)
heads.append(head)
types.append(type)
word_id = word_alphabet.get_index(word)
pos_id = pos_alphabet.get_index(pos)
type_id = type_alphabet.get_index(type)
word_ids.append(word_id)
pos_ids.append(pos_id)
type_ids.append(type_id)
if 1 < len(words) <= MAX_LENGTH:
word_sentences.append(words[:])
pos_sentences.append(poss[:])
head_sentences.append((heads[:]))
type_sentence.append(types[:])
word_index_sentences.append(word_ids[:])
pos_index_sentences.append(pos_ids[:])
type_index_sentences.append(type_ids[:])
num_tokens += len(words)
else:
if len(words) != 0:
logger.info("ignore sentence with length %d" % (len(words)))
logger.info("#sentences: %d, #tokens: %d" % (len(word_sentences), num_tokens))
return word_sentences, pos_sentences, head_sentences, type_sentence, word_index_sentences, pos_index_sentences, type_index_sentences
def generate_character_data(sentences_train, sentences_dev, sentences_test, max_sent_length, char_embedd_dim=30):
"""
generate data for charaters
:param sentences_train:
:param sentences_dev:
:param sentences_test:
:param max_sent_length:
:return: C_train, C_dev, C_test, char_embedd_table
"""
def get_character_indexes(sentences):
index_sentences = []
max_length = 0
for words in sentences:
index_words = []
for word in words:
index_chars = []
if len(word) > max_length:
max_length = len(word)
for char in word[:MAX_CHAR_LENGTH]:
char_id = char_alphabet.get_index(char)
index_chars.append(char_id)
index_words.append(index_chars)
index_sentences.append(index_words)
return index_sentences, max_length
def construct_tensor_char(index_sentences):
C = np.empty([len(index_sentences), max_sent_length, max_char_length], dtype=np.int32)
word_end_id = char_alphabet.get_index(word_end)
for i in range(len(index_sentences)):
words = index_sentences[i]
sent_length = len(words)
for j in range(sent_length):
chars = words[j]
char_length = len(chars)
for k in range(char_length):
cid = chars[k]
C[i, j, k] = cid
# fill index of word end after the end of word
C[i, j, char_length:] = word_end_id
# Zero out C after the end of the sentence
C[i, sent_length:, :] = 0
return C
def build_char_embedd_table():
scale = np.sqrt(3.0 / char_embedd_dim)
char_embedd_table = np.random.uniform(-scale, scale, [char_alphabet.size(), char_embedd_dim]).astype(
theano.config.floatX)
return char_embedd_table
char_alphabet = Alphabet('character')
char_alphabet.get_index(word_end)
index_sentences_train, max_char_length_train = get_character_indexes(sentences_train)
index_sentences_dev, max_char_length_dev = get_character_indexes(sentences_dev)
index_sentences_test, max_char_length_test = get_character_indexes(sentences_test)
# close character alphabet
char_alphabet.close()
logger.info("character alphabet size: %d" % (char_alphabet.size() - 1))
max_char_length = min(MAX_CHAR_LENGTH, max(max_char_length_train, max_char_length_dev, max_char_length_test))
logger.info("Maximum character length of training set is %d" % max_char_length_train)
logger.info("Maximum character length of dev set is %d" % max_char_length_dev)
logger.info("Maximum character length of test set is %d" % max_char_length_test)
logger.info("Maximum character length used for training is %d" % max_char_length)
# fill character tensor
C_train = construct_tensor_char(index_sentences_train)
C_dev = construct_tensor_char(index_sentences_dev)
C_test = construct_tensor_char(index_sentences_test)
return C_train, C_dev, C_test, build_char_embedd_table()
def get_max_length(word_sentences):
max_len = 0
for sentence in word_sentences:
length = len(sentence)
if length > max_len:
max_len = length
return max_len
def build_embedd_table(word_alphabet, embedd_dict, embedd_dim, caseless):
scale = np.sqrt(3.0 / embedd_dim)
embedd_table = np.empty([word_alphabet.size(), embedd_dim], dtype=theano.config.floatX)
embedd_table[word_alphabet.default_index, :] = np.random.uniform(-scale, scale, [1, embedd_dim])
for word, index in word_alphabet.iteritems():
ww = word.lower() if caseless else word
embedd = embedd_dict[ww] if ww in embedd_dict else np.random.uniform(-scale, scale, [1, embedd_dim])
embedd_table[index, :] = embedd
return embedd_table
def load_dataset_sequence_labeling(train_path, dev_path, test_path, word_column=1, label_column=4,
label_name='pos', oov='embedding', fine_tune=False, embedding="word2Vec",
embedding_path=None,
use_character=False):
"""
load data from file
:param train_path: path of training file
:param dev_path: path of dev file
:param test_path: path of test file
:param word_column: the column index of word (start from 0)
:param label_column: the column of label (start from 0)
:param label_name: name of label, such as pos or ner
:param oov: embedding for oov word, choose from ['random', 'embedding']. If "embedding", then add words in dev and
test data to alphabet; if "random", not.
:param fine_tune: if fine tune word embeddings.
:param embedding: embeddings for words, choose from ['word2vec', 'senna'].
:param embedding_path: path of file storing word embeddings.
:param use_character: if use character embeddings.
:return: X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test,
embedd_table (if fine tune), label_alphabet, C_train, C_dev, C_test, char_embedd_table
"""
def construct_tensor_fine_tune(word_index_sentences, label_index_sentences):
X = np.empty([len(word_index_sentences), max_length], dtype=np.int32)
Y = np.empty([len(word_index_sentences), max_length], dtype=np.int32)
mask = np.zeros([len(word_index_sentences), max_length], dtype=theano.config.floatX)
for i in range(len(word_index_sentences)):
word_ids = word_index_sentences[i]
label_ids = label_index_sentences[i]
length = len(word_ids)
for j in range(length):
wid = word_ids[j]
label = label_ids[j]
X[i, j] = wid
Y[i, j] = label - 1
# Zero out X after the end of the sequence
X[i, length:] = 0
# Copy the last label after the end of the sequence
Y[i, length:] = Y[i, length - 1]
# Make the mask for this sample 1 within the range of length
mask[i, :length] = 1
return X, Y, mask
def generate_dataset_fine_tune():
"""
generate data tensor when fine tuning
:return: X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, embedd_table, label_size
"""
embedd_dict, embedd_dim, caseless = utils.load_word_embedding_dict(embedding, embedding_path, word_alphabet,
logger)
logger.info("Dimension of embedding is %d, Caseless: %d" % (embedd_dim, caseless))
# fill data tensor (X.shape = [#data, max_length], Y.shape = [#data, max_length])
X_train, Y_train, mask_train = construct_tensor_fine_tune(word_index_sentences_train,
label_index_sentences_train)
X_dev, Y_dev, mask_dev = construct_tensor_fine_tune(word_index_sentences_dev, label_index_sentences_dev)
X_test, Y_test, mask_test = construct_tensor_fine_tune(word_index_sentences_test, label_index_sentences_test)
C_train, C_dev, C_test, char_embedd_table = generate_character_data(word_sentences_train, word_sentences_dev,
word_sentences_test,
max_length) if use_character else (
None, None, None, None)
return X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
build_embedd_table(word_alphabet, embedd_dict, embedd_dim, caseless), label_alphabet, \
C_train, C_dev, C_test, char_embedd_table
def construct_tensor_not_fine_tune(word_sentences, label_index_sentences, unknown_embedd, embedd_dict,
embedd_dim, caseless):
X = np.empty([len(word_sentences), max_length, embedd_dim], dtype=theano.config.floatX)
Y = np.empty([len(word_sentences), max_length], dtype=np.int32)
mask = np.zeros([len(word_sentences), max_length], dtype=theano.config.floatX)
# bad_dict = dict()
# bad_num = 0
for i in range(len(word_sentences)):
words = word_sentences[i]
label_ids = label_index_sentences[i]
length = len(words)
for j in range(length):
word = words[j].lower() if caseless else words[j]
label = label_ids[j]
embedd = embedd_dict[word] if word in embedd_dict else unknown_embedd
X[i, j, :] = embedd
Y[i, j] = label - 1
# if word not in embedd_dict:
# bad_num += 1
# if word in bad_dict:
# bad_dict[word] += 1
# else:
# bad_dict[word] = 1
# Zero out X after the end of the sequence
X[i, length:] = np.zeros([1, embedd_dim], dtype=theano.config.floatX)
# Copy the last label after the end of the sequence
Y[i, length:] = Y[i, length - 1]
# Make the mask for this sample 1 within the range of length
mask[i, :length] = 1
# for w, c in bad_dict.items():
# if c >= 100:
# print "%s: %d" % (w, c)
# print bad_num
return X, Y, mask
def generate_dataset_not_fine_tune():
"""
generate data tensor when not fine tuning
:return: X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, None, label_size
"""
embedd_dict, embedd_dim, caseless = utils.load_word_embedding_dict(embedding, embedding_path, word_alphabet,
logger)
logger.info("Dimension of embedding is %d, Caseless: %s" % (embedd_dim, caseless))
# fill data tensor (X.shape = [#data, max_length, embedding_dim], Y.shape = [#data, max_length])
unknown_embedd = np.random.uniform(-0.01, 0.01, [1, embedd_dim])
X_train, Y_train, mask_train = construct_tensor_not_fine_tune(word_sentences_train,
label_index_sentences_train, unknown_embedd,
embedd_dict, embedd_dim, caseless)
X_dev, Y_dev, mask_dev = construct_tensor_not_fine_tune(word_sentences_dev, label_index_sentences_dev,
unknown_embedd, embedd_dict, embedd_dim, caseless)
X_test, Y_test, mask_test = construct_tensor_not_fine_tune(word_sentences_test, label_index_sentences_test,
unknown_embedd, embedd_dict, embedd_dim, caseless)
C_train, C_dev, C_test, char_embedd_table = generate_character_data(word_sentences_train, word_sentences_dev,
word_sentences_test,
max_length) if use_character else (
None, None, None, None)
return X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
None, label_alphabet, C_train, C_dev, C_test, char_embedd_table
word_alphabet = Alphabet('word')
label_alphabet = Alphabet(label_name)
# read training data
logger.info("Reading data from training set...")
word_sentences_train, _, word_index_sentences_train, label_index_sentences_train = read_conll_sequence_labeling(
train_path, word_alphabet, label_alphabet, word_column, label_column)
# if oov is "random" and do not fine tune, close word_alphabet
if oov == "random" and not fine_tune:
logger.info("Close word alphabet.")
word_alphabet.close()
# read dev data
logger.info("Reading data from dev set...")
word_sentences_dev, _, word_index_sentences_dev, label_index_sentences_dev = read_conll_sequence_labeling(
dev_path, word_alphabet, label_alphabet, word_column, label_column)
# read test data
logger.info("Reading data from test set...")
word_sentences_test, _, word_index_sentences_test, label_index_sentences_test = read_conll_sequence_labeling(
test_path, word_alphabet, label_alphabet, word_column, label_column)
# close alphabets
word_alphabet.close()
label_alphabet.close()
logger.info("word alphabet size: %d" % (word_alphabet.size() - 1))
logger.info("label alphabet size: %d" % (label_alphabet.size() - 1))
# get maximum length
max_length_train = get_max_length(word_sentences_train)
max_length_dev = get_max_length(word_sentences_dev)
max_length_test = get_max_length(word_sentences_test)
max_length = min(MAX_LENGTH, max(max_length_train, max_length_dev, max_length_test))
logger.info("Maximum length of training set is %d" % max_length_train)
logger.info("Maximum length of dev set is %d" % max_length_dev)
logger.info("Maximum length of test set is %d" % max_length_test)
logger.info("Maximum length used for training is %d" % max_length)
if fine_tune:
logger.info("Generating data with fine tuning...")
return generate_dataset_fine_tune()
else:
logger.info("Generating data without fine tuning...")
return generate_dataset_not_fine_tune()
def load_dataset_parsing(train_path, dev_path, test_path, word_column=1, pos_column=4, head_column=6, type_column=7,
embedding="word2Vec", embedding_path=None):
"""
load data from file
:param train_path: path of training file
:param dev_path: path of dev file
:param test_path: path of test file
:param word_column: the column index of word (start from 0)
:param pos_column: the column index of pos (start from 0)
:param head_column: the column index of head (start from 0)
:param type_column: the column index of types (start from 0)
:param embedding: embeddings for words, choose from ['word2vec', 'senna'].
:param embedding_path: path of file storing word embeddings.
:return: X_train, POS_train, Head_train, Type_train, mask_train,
X_dev, POS_dev, Head_dev, Type_dev, mask_dev,
X_test, POS_test, Head_test, Type_test, mask_test,
embedd_table, word_alphabet, pos_alphabet, type_alphabet, C_train, C_dev, C_test, char_embedd_table
"""
def construct_tensor(word_index_sentences, pos_index_sentences, head_sentences, type_index_sentences):
X = np.empty([len(word_index_sentences), max_length], dtype=np.int32)
POS = np.empty([len(word_index_sentences), max_length], dtype=np.int32)
Head = np.empty([len(word_index_sentences), max_length], dtype=np.int32)
Type = np.empty([len(word_index_sentences), max_length], dtype=np.int32)
mask = np.zeros([len(word_index_sentences), max_length], dtype=theano.config.floatX)
for i in range(len(word_index_sentences)):
word_ids = word_index_sentences[i]
pos_ids = pos_index_sentences[i]
heads = head_sentences[i]
type_ids = type_index_sentences[i]
length = len(word_ids)
for j in range(length):
wid = word_ids[j]
pid = pos_ids[j]
head = heads[j]
tid = type_ids[j]
X[i, j] = wid
POS[i, j] = pid - 1
Head[i, j] = head
Type[i, j] = tid - 1
# Zero out X after the end of the sequence
X[i, length:] = 0
# Copy the last label after the end of the sequence
POS[i, length:] = POS[i, length - 1]
Head[i, length:] = Head[i, length - 1]
Type[i, length:] = Type[i, length - 1]
# Make the mask for this sample 1 within the range of length
mask[i, :length] = 1
return X, POS, Head, Type, mask
word_alphabet = Alphabet('word')
pos_alphabet = Alphabet('pos')
type_alphabet = Alphabet('type')
# read training data
logger.info("Reading data from training set...")
word_sentences_train, pos_sentences_train, head_sentences_train, type_sentence_train, \
word_index_sentences_train, pos_index_sentences_train, \
type_index_sentences_train = read_conll_parsing(train_path, word_alphabet, pos_alphabet, type_alphabet, word_column,
pos_column, head_column, type_column)
# read dev data
logger.info("Reading data from dev set...")
word_sentences_dev, pos_sentences_dev, head_sentences_dev, type_sentence_dev, \
word_index_sentences_dev, pos_index_sentences_dev, \
type_index_sentences_dev = read_conll_parsing(dev_path, word_alphabet, pos_alphabet, type_alphabet, word_column,
pos_column, head_column, type_column)
# read test data
logger.info("Reading data from test set...")
word_sentences_test, pos_sentences_test, head_sentences_test, type_sentence_test, \
word_index_sentences_test, pos_index_sentences_test, \
type_index_sentences_test = read_conll_parsing(test_path, word_alphabet, pos_alphabet, type_alphabet, word_column,
pos_column, head_column, type_column)
# close alphabets
word_alphabet.close()
pos_alphabet.close()
type_alphabet.close()
logger.info("word alphabet size: %d" % (word_alphabet.size() - 1))
logger.info("pos alphabet size: %d" % (pos_alphabet.size() - 1))
logger.info("type alphabet size: %d" % (type_alphabet.size() - 1))
# get maximum length
max_length_train = get_max_length(word_sentences_train)
max_length_dev = get_max_length(word_sentences_dev)
max_length_test = get_max_length(word_sentences_test)
max_length = min(MAX_LENGTH, max(max_length_train, max_length_dev, max_length_test))
logger.info("Maximum length of training set is %d" % max_length_train)
logger.info("Maximum length of dev set is %d" % max_length_dev)
logger.info("Maximum length of test set is %d" % max_length_test)
logger.info("Maximum length used for training is %d" % max_length)
embedd_dict, embedd_dim, caseless = utils.load_word_embedding_dict(embedding, embedding_path, word_alphabet,
logger)
logger.info("Dimension of embedding is %d, Caseless: %d" % (embedd_dim, caseless))
# fill data tensor (X.shape = [#data, max_length], {POS, Head, Type}.shape = [#data, max_length])
X_train, POS_train, Head_train, Type_train, mask_train = construct_tensor(word_index_sentences_train,
pos_index_sentences_train,
head_sentences_train,
type_index_sentences_train)
X_dev, POS_dev, Head_dev, Type_dev, mask_dev = construct_tensor(word_index_sentences_dev,
pos_index_sentences_dev,
head_sentences_dev,
type_index_sentences_dev)
X_test, POS_test, Head_test, Type_test, mask_test = construct_tensor(word_index_sentences_test,
pos_index_sentences_test,
head_sentences_test,
type_index_sentences_test)
embedd_table = build_embedd_table(word_alphabet, embedd_dict, embedd_dim, caseless)
C_train, C_dev, C_test, char_embedd_table = generate_character_data(word_sentences_train, word_sentences_dev,
word_sentences_test, max_length)
return X_train, POS_train, Head_train, Type_train, mask_train, \
X_dev, POS_dev, Head_dev, Type_dev, mask_dev, \
X_test, POS_test, Head_test, Type_test, mask_test, \
embedd_table, word_alphabet, pos_alphabet, type_alphabet, \
C_train, C_dev, C_test, char_embedd_table
| 27,450
| 43.0626
| 136
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/utils/objectives.py
|
"""
objectives for some loss functions
"""
__author__ = 'max'
import numpy as np
import theano
import theano.tensor as T
import lasagne_nlp.theano.nlinalg as nlinalg
def theano_logsumexp(x, axis=None):
"""
Compute log(sum(exp(x), axis=axis) in a numerically stable
fashion.
Parameters
----------
x : tensor_like
A Theano tensor (any dimension will do).
axis : int or symbolic integer scalar, or None
Axis over which to perform the summation. `None`, the
default, performs over all axes.
Returns
-------
result : ndarray or scalar
The result of the log(sum(exp(...))) operation.
"""
xmax = x.max(axis=axis, keepdims=True)
xmax_ = x.max(axis=axis)
return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))
def parser_loss(energies, heads, types, masks):
"""
compute minus log likelihood of parser as parser loss.
:param energies: Theano 4D tensor
energies of each edge. the shape is [batch_size, n_steps, n_steps, num_labels],
where the summy root is at index 0.
:param heads: Theano 2D tensor
heads in the shape [batch_size, n_steps].
:param types: Theano 2D tensor
types in the shape [batch_size, n_steps].
:param masks: Theano 2D tensor
masks in the shape [batch_size, n_steps].
:return: Theano 1D tensor
an expression for minus log likelihood loss.
"""
input_shape = energies.shape
batch_size = input_shape[0]
length = input_shape[1]
# get the exp of energies, and add along the label axis.
# the shape is [batch_size, n, n].
E = T.exp(energies).sum(axis=3)
# zero out the elements out the length of each sentence.
if masks is not None:
masks_shuffled = masks.dimshuffle(0, 1, 'x')
E = E * masks_shuffled
masks_shuffled = masks.dimshuffle(0, 'x', 1)
E = E * masks_shuffled
# compute the D tensor.
# the shape is [batch_size, n, n]
D = E.sum(axis=1)
D = T.zeros_like(E) + D.dimshuffle(0, 1, 'x')
# zeros out all elements except diagonal.
D = D * T.eye(length, length, 0).dimshuffle('x', 0, 1)
# compute lengths
lengths = T.cast(masks, dtype='int32').sum(axis=1)
# compute laplacian matrix
L = D - E
# compute partition Z(x)
partitions, _ = theano.scan(fn=lambda laps, length: nlinalg.logabsdet(laps[1:length, 1:length]), outputs_info=None,
sequences=[L, lengths])
# compute targets energy
# first create indice matrix
indices = T.zeros_like(heads) + T.arange(length).dimshuffle('x', 0)
# compute loss matrix shape = [n_steps, batch_size]
target_energy = energies[T.arange(batch_size), heads.T, indices.T, types.T]
# shuffle loss to [batch_size, n_steps]
target_energy = target_energy.dimshuffle(1, 0)
# remove the first element [batch, n_steps -1]
target_energy = target_energy[:, 1:]
# sum over n_step shape = [batch_size]
target_energy = target_energy.sum(axis=1)
return partitions - target_energy#, E, D, L, partitions, target_energy
def crf_loss(energies, targets, masks):
"""
compute minus log likelihood of crf as crf loss.
:param energies: Theano 4D tensor
energies of each step. the shape is [batch_size, n_time_steps, num_labels, num_labels],
where the pad label index is at last.
:param targets: Theano 2D tensor
targets in the shape [batch_size, n_time_steps]
:param masks: Theano 2D tensor
masks in the shape [batch_size, n_time_steps]
:return: Theano 1D tensor
an expression for minus log likelihood loss.
"""
assert energies.ndim == 4
assert targets.ndim == 2
assert masks.ndim == 2
def inner_function(energies_one_step, targets_one_step, mask_one_step, prior_partition, prev_label, tg_energy):
"""
:param energies_one_step: [batch_size, t, t]
:param targets_one_step: [batch_size]
:param prior_partition: [batch_size, t]
:param prev_label: [batch_size]
:param tg_energy: [batch_size]
:return:
"""
partition_shuffled = prior_partition.dimshuffle(0, 1, 'x')
partition_t = T.switch(mask_one_step.dimshuffle(0, 'x'),
theano_logsumexp(energies_one_step + partition_shuffled, axis=1),
prior_partition)
return [partition_t, targets_one_step,
tg_energy + energies_one_step[T.arange(energies_one_step.shape[0]), prev_label, targets_one_step]]
# Input should be provided as (n_batch, n_time_steps, num_labels, num_labels)
# but scan requires the iterable dimension to be first
# So, we need to dimshuffle to (n_time_steps, n_batch, num_labels, num_labels)
energies_shuffled = energies.dimshuffle(1, 0, 2, 3)
targets_shuffled = targets.dimshuffle(1, 0)
masks_shuffled = masks.dimshuffle(1, 0)
# initials should be energies_shuffles[0, :, -1, :]
init_label = T.cast(T.fill(energies[:, 0, 0, 0], -1), 'int32')
energy_time0 = energies_shuffled[0]
target_time0 = targets_shuffled[0]
initials = [energies_shuffled[0, :, -1, :], target_time0,
energy_time0[T.arange(energy_time0.shape[0]), init_label, target_time0]]
[partitions, _, target_energies], _ = theano.scan(fn=inner_function, outputs_info=initials,
sequences=[energies_shuffled[1:], targets_shuffled[1:],
masks_shuffled[1:]])
partition = partitions[-1]
target_energy = target_energies[-1]
loss = theano_logsumexp(partition, axis=1) - target_energy
return loss
def crf_accuracy(energies, targets):
"""
decode crf and compute accuracy
:param energies: Theano 4D tensor
energies of each step. the shape is [batch_size, n_time_steps, num_labels, num_labels],
where the pad label index is at last.
:param targets: Theano 2D tensor
targets in the shape [batch_size, n_time_steps]
:return: Theano 1D tensor
an expression for minus log likelihood loss.
"""
assert energies.ndim == 4
assert targets.ndim == 2
def inner_function(energies_one_step, prior_pi, prior_pointer):
"""
:param energies_one_step: [batch_size, t, t]
:param prior_pi: [batch_size, t]
:param prior_pointer: [batch_size, t]
:return:
"""
prior_pi_shuffled = prior_pi.dimshuffle(0, 1, 'x')
pi_t = T.max(prior_pi_shuffled + energies_one_step, axis=1)
pointer_t = T.argmax(prior_pi_shuffled + energies_one_step, axis=1)
return [pi_t, pointer_t]
def back_pointer(pointer, pointer_tp1):
"""
:param pointer: [batch, t]
:param point_tp1: [batch,]
:return:
"""
return pointer[T.arange(pointer.shape[0]), pointer_tp1]
# Input should be provided as (n_batch, n_time_steps, num_labels, num_labels)
# but scan requires the iterable dimension to be first
# So, we need to dimshuffle to (n_time_steps, n_batch, num_labels, num_labels)
energies_shuffled = energies.dimshuffle(1, 0, 2, 3)
# pi at time 0 is the last rwo at time 0. but we need to remove the last column which is the pad symbol.
pi_time0 = energies_shuffled[0, :, -1, :-1]
# the last row and column is the tag for pad symbol. reduce these two dimensions by 1 to remove that.
# now the shape of energies_shuffled is [n_time_steps, b_batch, t, t] where t = num_labels - 1.
energies_shuffled = energies_shuffled[:, :, :-1, :-1]
initials = [pi_time0, T.cast(T.fill(pi_time0, -1), 'int64')]
[pis, pointers], _ = theano.scan(fn=inner_function, outputs_info=initials, sequences=[energies_shuffled[1:]])
pi_n = pis[-1]
pointer_n = T.argmax(pi_n, axis=1)
back_pointers, _ = theano.scan(fn=back_pointer, outputs_info=pointer_n, sequences=[pointers], go_backwards=True)
# prediction shape [batch_size, length]
prediction_revered = T.concatenate([pointer_n.dimshuffle(0, 'x'), back_pointers.dimshuffle(1, 0)], axis=1)
prediction = prediction_revered[:, T.arange(prediction_revered.shape[1] - 1, -1, -1)]
return prediction, T.eq(prediction, targets)
| 8,314
| 37.85514
| 119
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/utils/alphabet.py
|
__author__ = 'max'
"""
Alphabet maps objects to integer ids. It provides two way mapping from the index to the objects.
"""
import json
import os
from lasagne_nlp.utils import utils as utils
class Alphabet:
def __init__(self, name, keep_growing=True):
self.__name = name
self.instance2index = {}
self.instances = []
self.keep_growing = keep_growing
# Index 0 is occupied by default, all else following.
self.default_index = 0
self.next_index = 1
self.logger = utils.get_logger('Alphabet')
def add(self, instance):
if instance not in self.instance2index:
self.instances.append(instance)
self.instance2index[instance] = self.next_index
self.next_index += 1
def get_index(self, instance):
try:
return self.instance2index[instance]
except KeyError:
if self.keep_growing:
index = self.next_index
self.add(instance)
return index
else:
return self.default_index
def get_instance(self, index):
if index == 0:
# First index is occupied by the wildcard element.
return None
try:
return self.instances[index - 1]
except IndexError:
self.logger.warn('unknown instance, return the first label.')
return self.instances[0]
def size(self):
return len(self.instances) + 1
def iteritems(self):
return self.instance2index.iteritems()
def enumerate_items(self, start=1):
if start < 1 or start >= self.size():
raise IndexError("Enumerate is allowed between [1 : size of the alphabet)")
return zip(range(start, len(self.instances) + 1), self.instances[start - 1:])
def close(self):
self.keep_growing = False
def open(self):
self.keep_growing = True
def get_content(self):
return {'instance2index': self.instance2index, 'instances': self.instances}
def from_json(self, data):
self.instances = data["instances"]
self.instance2index = data["instance2index"]
def save(self, output_directory, name=None):
"""
Save both alhpabet records to the given directory.
:param output_directory: Directory to save model and weights.
:param name: The alphabet saving name, optional.
:return:
"""
saving_name = name if name else self.__name
try:
json.dump(self.get_content(), open(os.path.join(output_directory, saving_name + ".json"), 'w'))
except Exception as e:
self.logger.warn("Alphabet is not saved: " % repr(e))
def load(self, input_directory, name=None):
"""
Load model architecture and weights from the give directory. This allow we use old models even the structure
changes.
:param input_directory: Directory to save model and weights
:return:
"""
loading_name = name if name else self.__name
self.from_json(json.load(open(os.path.join(input_directory, loading_name + ".json"))))
| 3,173
| 31.060606
| 116
|
py
|
LasagneNLP
|
LasagneNLP-master/lasagne_nlp/utils/__init__.py
|
__author__ = 'max'
| 19
| 9
| 18
|
py
|
SSC
|
SSC-master/main.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
python script to train the SSC model
---
Jie Li
jieli_cn@163.com
Nanjing University of Science and Technology
Aug 25, 2019
"""
from utils.seed import seed_torch
import os
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
import datetime
from dataloaders import make_data_loader
import sscMetrics
from models import make_model
import config
parser = argparse.ArgumentParser(description='PyTorch SSC Training')
parser.add_argument('--dataset', type=str, default='nyu', choices=['nyu', 'nyucad', 'debug'],
help='dataset name (default: nyu)')
parser.add_argument('--model', type=str, default='ddrnet', choices=['ddrnet', 'aicnet', 'grfnet', 'palnet'],
help='model name (default: palnet)')
# parser.add_argument('--data_augment', default=False, type=bool, help='data augment for training')
parser.add_argument('--epochs', default=50, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--lr', default=0.01, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--lr_adj_n', default=100, type=int, metavar='LR', help='every n epochs adjust learning rate once')
parser.add_argument('--lr_adj_rate', default=0.1, type=float, metavar='LR', help='scale while adjusting learning rate')
parser.add_argument('--batch_size', default=4, type=int, metavar='N', help='mini-batch size (default: 4)')
parser.add_argument('--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--resume', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--checkpoint', default='./', metavar='DIR', help='path to checkpoint')
# parser.add_argument('--logdir', default='./logs_debug', metavar='DIR', help='path to logs')
parser.add_argument('--model_name', default='SSC_debug', type=str, help='name of model to save check points')
# parser.add_argument('--w', default=0.05, type=float, help='weight')
global args
args = parser.parse_args()
seed_torch(2019)
def main():
# ---- Check CUDA
if torch.cuda.is_available():
print("Great, You have {} CUDA device!".format(torch.cuda.device_count()))
else:
print("Sorry, You DO NOT have a CUDA device!")
train_time_start = datetime.datetime.now()
train()
print('Training finished in: {}'.format(datetime.datetime.now() - train_time_start))
def train():
# ---- Data loader
train_loader, val_loader = make_data_loader(args)
# ---- create model ---------- ---------- ---------- ---------- ----------#
net = make_model(args.model, num_classes=12).cuda()
#net = torch.nn.DataParallel(net) # Multi-GPU
# ---- optionally resume from a checkpoint --------- ---------- ----------#
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
cp_states = torch.load(args.resume)
net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(args.resume))
# -------- ---------- --- Set checkpoint --------- ---------- ----------#
# timestamp = datetime.datetime.now().strftime("%Y%m%d-%H.%M.%S")
# model_info = 'epoch{}_lr{}'.format(args.epochs, args.lr)
cp_filename = args.checkpoint + 'cp_{}.pth.tar'.format(args.model_name)
cp_best_filename = args.checkpoint + 'cpBest_{}.pth.tar'.format(args.model_name)
# ---- Define loss function (criterion) and optimizer ---------- ----------#
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=0.0005, momentum=0.9)
loss_func = torch.nn.CrossEntropyLoss(weight=config.class_weights, ignore_index=255).cuda()
# ---- Print Settings for training -------- ---------- ---------- ----------#
print('Training epochs:{} \nInitial Learning rate:{} \nBatch size:{} \nNumber of workers:{}'.format(
args.epochs,
args.lr,
args.batch_size,
args.workers,
cp_filename))
print("Checkpoint filename:{}".format(cp_filename))
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_adj_n, gamma=args.lr_adj_rate, last_epoch=-1)
np.set_printoptions(precision=1)
# ---- Train
step_count_all = 0
best_miou = 0
print("Start training")
for epoch in range(0, args.epochs):
# print("epoch {}".format(epoch))
net.train() # switch to train mode
# adjust_learning_rate(optimizer, args.lr, epoch, n=args.lr_adj_n, rate=args.lr_adj_rate) # n=10, rate=0.9
decs_str = 'Training epoch {}/{}'.format(epoch + 1, args.epochs)
log_loss_1epoch = 0.0
step_count = 0
torch.cuda.empty_cache()
for step, (rgb, depth, tsdf, target, position, _) in tqdm(enumerate(train_loader), desc=decs_str, unit='step'):
# target should be a LongTensor. (bs, 60L, 36L, 60L)
y_true = target.long().contiguous()
y_true = Variable(y_true.view(-1)).cuda() # bs * D * H * W
# ---- (bs, C, D, H, W), channel first for Conv3d in pyTorch
# FloatTensor to Variable. (bs, channels, 240L, 144L, 240L)
x_depth = Variable(depth.float()).cuda()
position = position.long().cuda()
if args.model == 'palnet':
x_tsdf = Variable(tsdf.float()).cuda()
y_pred = net(x_depth=x_depth, x_tsdf=x_tsdf, p=position)
else:
x_rgb = Variable(rgb.float()).cuda()
y_pred = net(x_depth=x_depth, x_rgb=x_rgb, p=position)
y_pred = y_pred.permute(0, 2, 3, 4, 1).contiguous() # (BS, C, D, H, W) --> (BS, D, H, W, C)
y_pred = y_pred.view(-1, 12) # C = 12
optimizer.zero_grad()
loss = loss_func(y_pred, y_true)
loss.backward()
optimizer.step()
scheduler.step()
# ---- Evaluate on validation set
v_prec, v_recall, v_iou, v_acc, v_ssc_iou, v_mean_iou = validate_on_dataset_stsdf(net, val_loader)
print('Validate with TSDF:epoch {}, p {:.1f}, r {:.1f}, IoU {:.1f}'.format(epoch + 1, v_prec*100.0, v_recall*100.0, v_iou*100.0))
print('pixel-acc {:.4f}, mean IoU {:.1f}, SSC IoU:{}'.format(v_acc*100.0, v_mean_iou*100.0, v_ssc_iou*100.0))
# ---- Save Checkpoint
is_best = v_mean_iou > best_miou
best_miou = max(v_mean_iou, best_miou)
state = {'state_dict': net.state_dict()}
torch.save(state, cp_filename)
if is_best:
print('Yeah! Got better mIoU {}% in epoch {}. State saved'.format(100.0*v_mean_iou, epoch + 1))
torch.save(state, cp_best_filename) # Save Checkpoint
# --------------------------------------------------------------------------------------------------------------
def validate_on_dataset_stsdf(model, date_loader, save_ply=False):
"""
Evaluate on validation set.
model: network with parameters loaded
date_loader: TEST mode
"""
model.eval() # switch to evaluate mode.
val_acc, val_p, val_r, val_iou = 0.0, 0.0, 0.0, 0.0
_C = 12
val_cnt_class = np.zeros(_C, dtype=np.int32) # count for each class
val_iou_ssc = np.zeros(_C, dtype=np.float32) # sum of iou for each class
count = 0
with torch.no_grad():
# ---- STSDF depth, input, target, position, _
for step, (rgb, depth, volume, y_true, nonempty, position, filename) in tqdm(enumerate(date_loader), desc='Validating', unit='frame'):
var_x_depth = Variable(depth.float()).cuda()
position = position.long().cuda()
if args.model == 'palnet':
var_x_volume = Variable(volume.float()).cuda()
y_pred = model(x_depth=var_x_depth, x_tsdf=var_x_volume, p=position)
else:
var_x_rgb = Variable(rgb.float()).cuda()
y_pred = model(x_depth=var_x_depth, x_rgb=var_x_rgb, p=position) # y_pred.size(): (bs, C, W, H, D)
y_pred = y_pred.cpu().data.numpy() # CUDA to CPU, Variable to numpy
y_true = y_true.numpy() # torch tensor to numpy
nonempty = nonempty.numpy()
p, r, iou, acc, iou_sum, cnt_class = validate_on_batch(y_pred, y_true, nonempty)
count += 1
val_acc += acc
val_p += p
val_r += r
val_iou += iou
val_iou_ssc = np.add(val_iou_ssc, iou_sum)
val_cnt_class = np.add(val_cnt_class, cnt_class)
# print('acc_w, acc, p, r, iou', acc_w, acc, p, r, iou)
val_acc = val_acc / count
val_p = val_p / count
val_r = val_r / count
val_iou = val_iou / count
val_iou_ssc, val_iou_ssc_mean = sscMetrics.get_iou(val_iou_ssc, val_cnt_class)
return val_p, val_r, val_iou, val_acc, val_iou_ssc, val_iou_ssc_mean
def validate_on_batch(predict, target, nonempty=None): # CPU
"""
predict: (bs, channels, D, H, W)
target: (bs, channels, D, H, W)
"""
# TODO: validation will increase the usage of GPU memory!!!
y_pred = predict
y_true = target
p, r, iou = sscMetrics.get_score_completion(y_pred, y_true, nonempty)
#acc, iou_sum, cnt_class = sscMetrics.get_score_semantic_and_completion(y_pred, y_true, stsdf)
acc, iou_sum, cnt_class, tp_sum, fp_sum, fn_sum = sscMetrics.get_score_semantic_and_completion(y_pred, y_true, nonempty)
# iou = np.divide(iou_sum, cnt_class)
return p, r, iou, acc, iou_sum, cnt_class
# static method
def adjust_learning_rate(optimizer, lr, epoch, n=10, rate=0.9):
"""Sets the learning rate to the initial LR decayed by rate=0.9 every n=10 epochs"""
new_lr = lr * (rate ** (epoch // n))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
if epoch % n == 0:
print('Current learning rate is: {}'.format(new_lr))
if __name__ == '__main__':
main()
| 10,137
| 39.552
| 142
|
py
|
SSC
|
SSC-master/sscMetrics.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
# from sklearn.metrics import precision_score, recall_score
"""
---- Input:
predict:
type, numpy.ndarray
shape, (BS=batch_size, C=class_num, W, H, D), onehot encoding
target:
type, numpy.ndarray
shape, (batch_size, W, H, D)
---- Return
iou, Intersection over Union
precision,
recall
"""
####################################################################################################
# Channel first, process in CPU with numpy
# (BS, C, W, H, D) or (BS, C, D, H, W)均可,注意predict和target两者统一即可
####################################################################################################
def get_iou(iou_sum, cnt_class):
# iou = np.divide(iou_sum, cnt_class) # what if cnt_class[i]==0, 当测试集中,某些样本类别缺失
_C = iou_sum.shape[0] # 12
iou = np.zeros(_C, dtype=np.float32) # iou for each class
for idx in range(_C):
iou[idx] = iou_sum[idx]/cnt_class[idx] if cnt_class[idx] else 0
# mean_iou = np.mean(iou, dtype=np.float32) # what if cnt_class[i]==0
# mean_iou = np.sum(iou) / np.count_nonzero(cnt_class)
mean_iou = np.sum(iou[1:]) / np.count_nonzero(cnt_class[1:]) # 去掉第一类empty
return iou, mean_iou
def get_accuracy(predict, target, weight=None): # 0.05s
_bs = predict.shape[0] # batch size
_C = predict.shape[1] # _C = 12
target = np.int32(target)
target = target.reshape(_bs, -1) # (_bs, 60*36*60) 129600
predict = predict.reshape(_bs, _C, -1) # (_bs, _C, 60*36*60)
predict = np.argmax(predict, axis=1) # one-hot: _bs x _C x 60*36*60 --> label: _bs x 60*36*60.
correct = (predict == target) # (_bs, 129600)
if weight: # 0.04s, add class weights
weight_k = np.ones(target.shape)
for i in range(_bs):
for n in range(target.shape[1]):
idx = 0 if target[i, n] == 255 else target[i, n]
weight_k[i, n] = weight[idx]
# weight_k[i, n] = weight[target[i, n]]
correct = correct * weight_k
acc = correct.sum() / correct.size
return acc
def get_score_semantic_and_completion(predict, target, nonempty=None):
_bs = predict.shape[0] # batch size
_C = predict.shape[1] # _C = 12
# ---- one-hot: _bs x _C x 60*36*60 --> label: _bs x 60*36*60.
predict = np.argmax(predict, axis=1)
# ---- check empty
if nonempty is not None:
predict[nonempty == 0] = 0 # 0 empty
nonempty = nonempty.reshape(_bs, -1)
# ---- ignore
# predict[target == 255] = 0
# target[target == 255] = 0
# ---- flatten
target = target.reshape(_bs, -1) # (_bs, 129600)
predict = predict.reshape(_bs, -1) # (_bs, 129600), 60*36*60=129600
cnt_class = np.zeros(_C, dtype=np.int32) # count for each class
iou_sum = np.zeros(_C, dtype=np.float32) # sum of iou for each class
tp_sum = np.zeros(_C, dtype=np.int32) # tp
fp_sum = np.zeros(_C, dtype=np.int32) # fp
fn_sum = np.zeros(_C, dtype=np.int32) # fn
acc = 0.0
for idx in range(_bs):
y_true = target[idx, :] # GT
y_pred = predict[idx, :]
# print('y_true.shape, y_pred.shape', y_true.shape, y_pred.shape)
# y_pred = y_pred[y_true != 255] # ---- ignore
# y_true = y_true[y_true != 255]
# print('y_true.shape, y_pred.shape', y_true.shape, y_pred.shape)
if nonempty is not None:
nonempty_idx = nonempty[idx, :]
# y_pred = y_pred[nonempty_idx == 1]
# y_true = y_true[nonempty_idx == 1]
y_pred = y_pred[np.where(np.logical_and(nonempty_idx == 1, y_true != 255))] # 去掉需ignore的点
y_true = y_true[np.where(np.logical_and(nonempty_idx == 1, y_true != 255))]
# print('y_true.shape, y_pred.shape', y_true.shape, y_pred.shape)
acc += accuracy_score(y_true, y_pred) # pixel accuracy
for j in range(_C): # for each class
tp = np.array(np.where(np.logical_and(y_true == j, y_pred == j))).size
fp = np.array(np.where(np.logical_and(y_true != j, y_pred == j))).size
fn = np.array(np.where(np.logical_and(y_true == j, y_pred != j))).size
u_j = np.array(np.where(y_true == j)).size
cnt_class[j] += 1 if u_j else 0
# iou = 1.0 * tp/(tp+fp+fn) if u_j else 0
# iou_sum[j] += iou
iou_sum[j] += 1.0*tp/(tp+fp+fn) if u_j else 0 # iou = tp/(tp+fp+fn)
tp_sum[j] = tp
fp_sum[j] = fp
fn_sum[j] = fn
acc = acc / _bs
# return acc, iou_sum, cnt_class
return acc, iou_sum, cnt_class, tp_sum, fp_sum, fn_sum
def get_score_completion(predict, target, nonempty=None): # on both observed and occluded voxels
"""for scene completion, treat the task as two-classes problem, just empty or occupancy"""
_bs = predict.shape[0] # batch size
# _C = predict.shape[1] # _C = 12
# ---- one-hot: _bs x _C x 60*36*60 --> label: _bs x 60*36*60.
predict = np.argmax(predict, axis=1)
# ---- check empty
if nonempty is not None:
predict[nonempty == 0] = 0 # 0 empty
nonempty = nonempty.reshape(_bs, -1)
# ---- ignore
predict[target == 255] = 0
target[target == 255] = 0
# ---- flatten
target = target.reshape(_bs, -1) # (_bs, 129600)
predict = predict.reshape(_bs, -1) # (_bs, _C, 129600), 60*36*60=129600
# ---- treat all non-empty object class as one category, set them to label 1
b_pred = np.zeros(predict.shape)
b_true = np.zeros(target.shape)
b_pred[predict > 0] = 1
b_true[target > 0] = 1
p, r, iou = 0.0, 0.0, 0.0
for idx in range(_bs):
y_true = b_true[idx, :] # GT
y_pred = b_pred[idx, :]
if nonempty is not None:
nonempty_idx = nonempty[idx, :]
y_true = y_true[nonempty_idx == 1]
y_pred = y_pred[nonempty_idx == 1]
# print('From [get_score_completion]: y_true.shape', y_true.shape)
# ---- Way 2: default pos_label=1, average='binary'
# _acc = accuracy_score(y_true, y_pred) # pixel accuracy
_p, _r, _, _ = precision_recall_fscore_support(y_true, y_pred, average='binary') # labels=[0, 1] pos_label=1,
_iou = 1 / (1 / _p + 1 / _r - 1) if _p else 0 # 1/iou = (tp+fp+fn)/tp = (tp+fp)/tp + (tp+fn)/tp - 1
p += _p
r += _r
iou += _iou
# acc += _acc
# print('_p, _r, _iou', _p, _r, _iou)
# acc = 100.0 * acc / _bs
p = p / _bs
r = r / _bs
iou = iou / _bs
return p, r, iou
| 6,687
| 40.283951
| 118
|
py
|
SSC
|
SSC-master/test.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
python script to evaluate the SSC model
---
Jie Li
jieli_cn@163.com
Nanjing University of Science and Technology
Aug 25, 2019
"""
import os
import torch
import argparse
import datetime
from dataloaders import make_data_loader
from models import make_model
from main import validate_on_dataset_stsdf
import config
parser = argparse.ArgumentParser(description='PyTorch SSC Training')
parser.add_argument('--dataset', type=str, default='nyu', choices=['nyu', 'nyucad', 'debug'],
help='dataset name (default: nyu)')
parser.add_argument('--model', type=str, default='ddrnet', choices=['ddrnet', 'aicnet', 'grfnet', 'palnet', 'lwddrnet'],
help='model name (default: palnet)')
parser.add_argument('--batch_size', default=4, type=int, metavar='N', help='mini-batch size (default: 4)')
parser.add_argument('--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--resume', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
global args
args = parser.parse_args()
def main():
# ---- Check CUDA
if torch.cuda.is_available():
print("Great, You have {} CUDA device!".format(torch.cuda.device_count()))
else:
print("Sorry, You DO NOT have a CUDA device!")
train_time_start = datetime.datetime.now()
test()
print('Training finished in: {}'.format(datetime.datetime.now() - train_time_start))
def test():
# ---- create model ---------- ---------- ---------- ---------- ----------#
net = make_model(args.model, num_classes=12).cuda()
#net = torch.nn.DataParallel(net) # Multi-GPU
# ---- load pretrained model --------- ---------- ----------#
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
cp_states = torch.load(args.resume)
net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(args.resume))
# ---- Data loader
train_loader, val_loader = make_data_loader(args)
torch.cuda.empty_cache()
# ---- Evaluation
v_prec, v_recall, v_iou, v_acc, v_ssc_iou, v_mean_iou = validate_on_dataset_stsdf(net, val_loader)
print('Validate with TSDF:, p {:.1f}, r {:.1f}, IoU {:.1f}'.format(v_prec*100.0, v_recall*100.0, v_iou*100.0))
print('pixel-acc {:.4f}, mean IoU {:.1f}, SSC IoU:{}'.format(v_acc*100.0, v_mean_iou*100.0, v_ssc_iou*100.0))
if __name__ == '__main__':
main()
| 2,574
| 33.333333
| 120
|
py
|
SSC
|
SSC-master/config.py
|
import numpy as np
import torch
class Path(object):
@staticmethod
def db_root_dir(dataset):
if dataset == 'nyu':
# folder that contains dataset/.
return {'train': '/home/mcheem/data/datasets/NYU_SSC/NYUtrain_npz',
'val': '/home/mcheem/data/datasets/NYU_SSC/NYUtest_npz'}
elif dataset == 'nyucad':
return {'train': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADtrain_npz',
'val': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADtest_npz'}
# debug
elif dataset == 'debug':
return {'train': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADval40_npz',
'val': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADval40_npz'}
else:
print('Dataset {} not available.'.format(dataset))
raise NotImplementedError
# ssc: color map
colorMap = np.array([[22, 191, 206], # 0 empty, free space
[214, 38, 40], # 1 ceiling
[43, 160, 4], # 2 floor
[158, 216, 229], # 3 wall
[114, 158, 206], # 4 window
[204, 204, 91], # 5 chair new: 180, 220, 90
[255, 186, 119], # 6 bed
[147, 102, 188], # 7 sofa
[30, 119, 181], # 8 table
[188, 188, 33], # 9 tvs
[255, 127, 12], # 10 furn
[196, 175, 214], # 11 objects
[153, 153, 153], # 12 Accessible area, or label==255, ignore
]).astype(np.int32)
# ###########################################################################################
class_weights = torch.FloatTensor([0.05, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
| 1,795
| 36.416667
| 93
|
py
|
SSC
|
SSC-master/infer_ros.py
|
#!/usr/bin/env python3
from utils.seed import seed_torch
import os
# Network dependencies
import torch
import argparse
import numpy as np
from torch.autograd import Variable
# ROS dependencies
import rospy
from sensor_msgs.msg import Image
import tf.transformations as tr
import tf
from cv_bridge import CvBridge
# local imports
from models import make_model
from utils import utils
from ssc_msgs.msg import SSCGrid
class ROSInfer:
def __init__(self):
self._load_arguments()
self.net = make_model(self.args.model, num_classes=12)
self.depth_cam_frame = self.args.depth_cam_frame
self.world_frame = self.args.world_frame
self.listener = tf.TransformListener()
self.ssc_pub = rospy.Publisher('ssc', SSCGrid, queue_size=10)
self.bridge = CvBridge()
def start(self):
"""
Loads SSC Network model and start listening to depth images.
"""
# load pretrained model
self.load_network()
self.depth_img_subscriber = rospy.Subscriber(
self.depth_cam_frame, Image, self.callback)
def callback(self, depth_image):
"""
Receive a Depth image from the simulation, voxelize the depthmap as TSDF, 2D to 3D mapping
and perform inference using 3D CNN. Publish the results as SSCGrid Message.
"""
# get depth camera pose wrt odom
try:
position, orientation = self.listener.lookupTransform(
self.world_frame, self.depth_cam_frame, depth_image.header.stamp)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
return
if torch.cuda.is_available():
torch.cuda.empty_cache()
# parse depth image
cv_image = self.bridge.imgmsg_to_cv2(
depth_image, desired_encoding='passthrough')
# prepare pose matrix
pose_matrix = tr.quaternion_matrix(orientation)
pose_matrix[0:3, -1] = position
vox_origin, rgb, depth, tsdf, position, occupancy_grid = self._load_data_from_depth_image(
cv_image, pose_matrix)
x_depth = Variable(depth.float()).to(self.device)
position = position.long().to(self.device)
if self.args.model == 'palnet':
x_tsdf = Variable(tsdf.float()).to(self.device)
y_pred = self.net(x_depth=x_depth, x_tsdf=x_tsdf, p=position)
else:
x_rgb = Variable(rgb.float())
y_pred = self.net(x_depth=x_depth, x_rgb=x_rgb, p=position)
scores = torch.nn.Softmax(dim=0)(y_pred.squeeze())
preds = torch.argmax(scores, dim=0).cpu().numpy()
#setup message
msg = SSCGrid()
msg.data = preds.reshape(-1).astype(np.float32).tolist()
msg.origin_x = vox_origin[0]
msg.origin_y = vox_origin[1]
msg.origin_z = vox_origin[2]
msg.frame = 'odom'
msg.width = preds.shape[0]
msg.height = preds.shape[1]
msg.depth = preds.shape[2]
# publish message
self.ssc_pub.publish(msg)
def _load_data_from_depth_image(self, depth, cam_pose, max_depth=8, cam_k=[[320, 0, 320], [0, 320, 240], [0, 0, 1]]):
"""
Takes a depth map, pose as input and outputs the 3D voxeloccupancy, 2D to 3D mapping and TSDF grid.
"""
rgb = None
depth_npy = np.array(depth)
# discard inf points
depth_npy[depth_npy > max_depth] = depth_npy.min()
# get voxel grid origin
vox_origin = utils.get_origin_from_depth_image(
depth_npy, cam_k, cam_pose)
# compute tsdf for the voxel grid from depth camera
vox_tsdf, depth_mapping_idxs, voxel_occupancy = utils.compute_tsdf(
depth_npy, vox_origin, cam_k, cam_pose)
return vox_origin, rgb, torch.as_tensor(depth_npy).unsqueeze(0).unsqueeze(0), torch.as_tensor(vox_tsdf).unsqueeze(0), torch.as_tensor(depth_mapping_idxs).unsqueeze(0).unsqueeze(0), torch.as_tensor(voxel_occupancy.transpose(2, 1, 0)).unsqueeze(0)
def load_network(self):
"""
Loads a pretrained model for inference
"""
if os.path.isfile(self.args.resume):
print("=> loading checkpoint '{}'".format(self.args.resume))
cp_states = torch.load(self.args.resume, map_location=torch.device('cpu'))
self.net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(self.args.resume))
if torch.cuda.is_available():
print("CUDA device found!".format(torch.cuda.device_count()))
self.device = torch.device('cuda')
else:
print("Using CPU!")
self.device = torch.device('cpu')
self.net = self.net.to(self.device)
# switch to test mode
self.net.eval()
def _load_arguments(self):
parser = argparse.ArgumentParser(description='PyTorch SSC Inference')
parser.add_argument('--depth_cam_frame', type=str, default='/airsim_drone/Depth_cam',
help='depth cam frame name (default: /airsim_drone/Depth_cam)')
parser.add_argument('--world_frame', type=str, default='/odom',
help='world frame name (default: /odom)')
parser.add_argument('--model', type=str, default='palnet', choices=['ddrnet', 'palnet'],
help='model name (default: palnet)')
parser.add_argument('--resume', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
args = parser.parse_args()
# use argparse arguments as default and override with ros params
args.world_frame = rospy.get_param('~world_frame', args.world_frame)
args.depth_cam_frame = rospy.get_param('~depth_cam_frame', args.depth_cam_frame)
args.model = rospy.get_param('~model', args.model)
args.resume = rospy.get_param('~resume', args.resume)
self.args = args
if __name__ == '__main__':
rospy.init_node("scene_completion")
ri = ROSInfer()
ri.start()
rospy.spin()
| 6,176
| 35.550296
| 253
|
py
|
SSC
|
SSC-master/infer.py
|
from utils.seed import seed_torch
import os
import torch
import argparse
import numpy as np
from pathlib import Path
import imageio
import glob
from tqdm import tqdm
from torch.autograd import Variable
import datetime
from models import make_model
import config
import VoxelUtils as vu
from utils import utils
parser = argparse.ArgumentParser(description='PyTorch SSC Inference')
parser.add_argument('--dataset', type=str, default='nyu', choices=['nyu', 'nyucad', 'debug'],
help='dataset name (default: nyu)')
parser.add_argument('--model', type=str, default='palnet', choices=['ddrnet', 'aicnet', 'grfnet', 'palnet'],
help='model name (default: palnet)')
parser.add_argument('--files', default="/home/mcheem/data/datasets/large_room/", help='Depth Images')
parser.add_argument('--resume', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--save_completions', type=str, metavar='PATH', default="outputs", help='path to save completions (default: none)')
parser.add_argument('--model_name', default='SSC_debug', type=str, help='name of model to save check points')
global args
args = parser.parse_args()
def load_data_from_depth_image(filename, max_depth=8, cam_k=[[320, 0, 320], [0, 320, 240], [0, 0, 1]]):
"""
Read depth and pose froms ave npz file and return tsdf voxels.
"""
rgb = None
frame_data = np.load(filename[:-4] + ".npz")
depth_npy = frame_data["depth"]
cam_pose = frame_data["pose"]
depth_npy[depth_npy > max_depth] = depth_npy.min()
vox_origin = utils.get_origin_from_depth_image(depth_npy, cam_k, cam_pose)
vox_tsdf, depth_mapping_idxs, voxel_occupancy = utils.compute_tsdf(
depth_npy, vox_origin, cam_k, cam_pose)
return rgb, torch.as_tensor(depth_npy).unsqueeze(0).unsqueeze(0), torch.as_tensor(vox_tsdf).unsqueeze(0), torch.as_tensor(depth_mapping_idxs).unsqueeze(0).unsqueeze(0), torch.as_tensor(voxel_occupancy.transpose(2, 1, 0)).unsqueeze(0)
def infer():
"""
Performan Inference on saved depth data and save the results
to output directory specified in the arguments.
"""
NUM_CLASSES = 12
net = make_model(args.model, num_classes=NUM_CLASSES).cuda()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
cp_states = torch.load(args.resume, map_location=torch.device('cpu'))
net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(args.resume))
# switch to eval mode
net.eval()
torch.cuda.empty_cache()
# retrive list of saved depth/pose array files
file_list = glob.glob(str(Path(args.files) / "*.npz"))
for step, depth_file in enumerate(file_list):
rgb, depth, tsdf, position, occupancy_grid = load_data_from_depth_image(depth_file)
x_depth = Variable(depth.float()).cuda()
position = position.long().cuda()
if args.model == 'palnet':
x_tsdf = Variable(tsdf.float()).cuda()
y_pred = net(x_depth=x_depth, x_tsdf=x_tsdf, p=position)
else:
x_rgb = Variable(rgb.float())
y_pred = net(x_depth=x_depth, x_rgb=x_rgb, p=position)
# calculate per voxel class
scores = torch.nn.Softmax(dim=0)(y_pred.squeeze())
scores[0] += 0.3 #Increase offset of empty class to weed out low prob predictions
preds = torch.argmax(scores, dim=0).cpu().numpy()
# save completions
if args.save_completions:
utils.labeled_voxel2ply(preds,"{}/{}_preds.ply".format(args.save_completions, Path(depth_file).stem))
occupancy_grid_downsampled = utils.downsample_voxel(occupancy_grid.squeeze().numpy())
utils.labeled_voxel2ply(occupancy_grid_downsampled,"{}/{}_scan.ply".format(args.save_completions, Path(depth_file).stem))
def main():
# ---- Check CUDA
if torch.cuda.is_available():
print("CUDA device found!".format(torch.cuda.device_count()))
else:
print("Using CPU!")
infer()
if __name__ == '__main__':
main()
| 4,225
| 37.072072
| 237
|
py
|
SSC
|
SSC-master/models/PALNet.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
PALNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from .projection_layer import Project2Dto3D
# ----------------------------------------------------------------------
# takes the depth and fTSDF as inputs
class SSC_PALNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_PALNet, self).__init__()
print("SSC_PALNet")
# ---- depth
depth_out = 6
self.conv2d_depth = nn.Sequential(
nn.Conv2d(1, depth_out, 3, 1, 1),
nn.ReLU(inplace=True),
)
in_ch = depth_out // 2
self.res_depth = nn.Sequential(
nn.Conv2d(depth_out, in_ch, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, depth_out, 1, 1, 0),
)
self.project_layer = Project2Dto3D(240, 144, 240) # w=240, h=144, d=240
in_channel_3d = depth_out
stride = 2
self.pool1 = nn.Conv3d(in_channel_3d, 8, 7, stride, 3)
self.reduction2_1 = nn.Conv3d(8, 16, 1, 1, 0, bias=False)
self.conv2_1 = nn.Sequential(
nn.Conv3d(8, 8, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 16, 1, 1, 0)
)
# ---- flipped_tsdf
in_channel_3d = 1
stride = 2
self.pool2 = nn.Conv3d(in_channel_3d, 8, 7, stride, 3)
self.reduction2_2 = nn.Conv3d(8, 16, 1, 1, 0, bias=False)
self.conv2_2 = nn.Sequential(
nn.Conv3d(8, 8, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 16, 1, 1, 0)
)
stride = 2
self.reduction3_1 = nn.Conv3d(16, 32, 1, stride, 0, bias=False)
self.conv3_1 = nn.Sequential(
nn.Conv3d(16, 8, 1, stride, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 32, 1, 1, 0),
)
stride = 2
self.reduction3_2 = nn.Conv3d(16, 32, 1, stride, 0, bias=False)
self.conv3_2 = nn.Sequential(
nn.Conv3d(16, 8, 1, stride, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 32, 1, 1, 0),
)
# -------------1/4
self.conv3_3 = nn.Sequential(
nn.Conv3d(64, 32, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 64, 1, 1, 0),
)
self.conv3_5 = nn.Sequential(
nn.Conv3d(64, 32, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.ReLU(inplace=True),
nn.Conv3d(32, 64, 1, 1, 0),
)
self.conv3_7 = nn.Sequential(
nn.Conv3d(64, 32, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.ReLU(inplace=True),
nn.Conv3d(32, 64, 1, 1, 0),
)
self.conv4_1 = nn.Conv3d(256, 128, 1, 1, 0)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv3d(128, 128, 1, 1, 0)
self.relu4_2 = nn.ReLU(inplace=True)
self.fc12 = nn.Conv3d(128, num_classes, 1, 1, 0) # C_NUM = 12, number of classes is 12
self.softmax = nn.Softmax(dim=1) # pytorch 0.3.0
# self.logsoftmax = nn.LogSoftmax(dim=1)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.xavier_uniform_(m.weight.data) # gain=1
# nn.init.constant(m.bias.data, 0)
nn.init.normal_(self.conv4_1.weight.data, mean=0, std=0.1)
nn.init.normal_(self.conv4_2.weight.data, mean=0, std=0.01)
nn.init.normal_(self.fc12.weight.data, mean=0, std=0.01)
def forward(self, x_depth, x_tsdf, p):
x0_depth = self.conv2d_depth(x_depth)
x0_depth = F.relu(self.res_depth(x0_depth) + x0_depth, inplace=True)
x0_depth = self.project_layer(x0_depth, p)
x1_depth = self.pool1(x0_depth)
x1_depth = F.relu(x1_depth, inplace=True)
x2_1_depth = self.reduction2_1(x1_depth) # (BS, 32L, 120L, 72L, 120L)
x2_2_depth = self.conv2_1(x1_depth)
x2_depth = x2_1_depth + x2_2_depth
x2_depth = F.relu(x2_depth, inplace=True)
x_tsdf_s = torch.unsqueeze(x_tsdf, 0).permute(1,0,2,3,4)
x1_tsdf = self.pool2(x_tsdf_s) # (BS, 16L, 120L, 72L, 120L)
x1_tsdf = F.relu(x1_tsdf, inplace=True)
x2_1_tsdf = self.reduction2_2(x1_tsdf) # (BS, 32L, 120L, 72L, 120L)
x2_2_tsdf = self.conv2_2(x1_tsdf)
x2_tsdf = x2_1_tsdf + x2_2_tsdf
x2_tsdf = F.relu(x2_tsdf, inplace=True)
x3_1_depth = self.reduction3_1(x2_depth) # (BS, 64L, 60L, 36L, 60L)
x3_2_depth = self.conv3_1(x2_depth)
x_3_depth = x3_1_depth + x3_2_depth
x_3_depth = F.relu(x_3_depth, inplace=True)
# print('SSC: x_3_depth', x_3_depth.size())
x3_1_tsdf = self.reduction3_2(x2_tsdf) # (BS, 64L, 60L, 36L, 60L)
x3_2_tsdf = self.conv3_2(x2_tsdf) #
x_3_tsdf = x3_1_tsdf + x3_2_tsdf
x_3_tsdf = F.relu(x_3_tsdf, inplace=True)
# print('SSC: x_3_tsdf', x_3_tsdf.size())
x_3 = torch.cat((x_3_depth, x_3_tsdf), dim=1)
# ---- 1/4
x_4 = self.conv3_3(x_3) + x_3
x_4 = F.relu(x_4, inplace=True)
# print 'SSC: x_4', x_4.size()
x_5 = self.conv3_5(x_4) + x_4
x_5 = F.relu(x_5, inplace=True)
# print 'SSC: x_5', x_5.size()
x_6 = self.conv3_7(x_5) + x_5
x_6 = F.relu(x_6, inplace=True)
# print 'SSC: x_6', x_6.size()
x_6 = torch.cat((x_3, x_4, x_5, x_6), dim=1) # channels concatenate
# x_6 = F.relu(x_6)
# print('SSC: channels concatenate x', x.size()) # (BS, 256L, 60L, 36L, 60L)
x_6 = self.conv4_1(x_6) # (BS, 128L, 60L, 36L, 60L)
x_6 = F.relu(x_6, inplace=True)
# x_6 = self.relu4_1(x_6)
x_6 = self.conv4_2(x_6) # (BS, 128L, 60L, 36L, 60L)
x_6 = F.relu(x_6, inplace=True)
# print 'SSC: x_6', x_6.size()
y = self.fc12(x_6) # (BS, 12L, 60L, 36L, 60L)
return y
| 6,635
| 32.346734
| 95
|
py
|
SSC
|
SSC-master/models/DDR.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
DDR
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
# ----------------------------------------------------------------------
class BasicDDR2d(nn.Module):
def __init__(self, c, k=3, dilation=1, residual=True):
super(BasicDDR2d, self).__init__()
d = dilation
p = k // 2 * d
self.conv_1xk = nn.Conv2d(c, c, (1, k), stride=1, padding=(0, p), bias=True, dilation=(1, d))
self.conv_kx1 = nn.Conv2d(c, c, (k, 1), stride=1, padding=(p, 0), bias=True, dilation=(d, 1))
self.residual = residual
def forward(self, x):
y = self.conv_1xk(x)
y = F.relu(y, inplace=True)
y = self.conv_kx1(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
# ----------------------------------------------------------------------
class BasicDDR3d(nn.Module):
def __init__(self, c, k=3, dilation=1, stride=1, residual=True):
super(BasicDDR3d, self).__init__()
d = dilation
p = k // 2 * d
# p = (d * (k - 1) + 1) // 2
s = stride
# print("k:{}, d:{}, p:{}".format(k, d, p))
self.conv_1x1xk = nn.Conv3d(c, c, (1, 1, k), stride=(1, 1, s), padding=(0, 0, p), bias=True, dilation=(1, 1, d))
self.conv_1xkx1 = nn.Conv3d(c, c, (1, k, 1), stride=(1, s, 1), padding=(0, p, 0), bias=True, dilation=(1, d, 1))
self.conv_kx1x1 = nn.Conv3d(c, c, (k, 1, 1), stride=(s, 1, 1), padding=(p, 0, 0), bias=True, dilation=(d, 1, 1))
self.residual = residual
def forward(self, x):
y = self.conv_1x1xk(x)
y = F.relu(y, inplace=True)
y = self.conv_1xkx1(y)
y = F.relu(y, inplace=True)
y = self.conv_kx1x1(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class BottleneckDDR2d(nn.Module):
def __init__(self, c_in, c, c_out, kernel=3, stride=1, dilation=1, residual=True):
super(BottleneckDDR2d, self).__init__()
s = stride
k = kernel
d = dilation
p = k // 2 * d
self.conv_in = nn.Conv2d(c_in, c, kernel_size=1, bias=False)
self.conv_1xk = nn.Conv2d(c, c, (1, k), stride=s, padding=(0, p), bias=True, dilation=(1, d))
self.conv_kx1 = nn.Conv2d(c, c, (k, 1), stride=s, padding=(p, 0), bias=True, dilation=(d, 1))
self.conv_out = nn.Conv2d(c, c_out, kernel_size=1, bias=False)
self.residual = residual
def forward(self, x):
y = self.conv_in(x)
y = F.relu(y, inplace=True)
y = self.conv_1xk(y)
y = F.relu(y, inplace=True)
y = self.conv_kx1(y)
y = F.relu(y, inplace=True)
y = self.conv_out(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class BottleneckDDR3d(nn.Module):
def __init__(self, c_in, c, c_out, kernel=3, stride=1, dilation=1, residual=True):
super(BottleneckDDR3d, self).__init__()
s = stride
k = kernel
d = dilation
p = k // 2 * d
self.conv_in = nn.Conv3d(c_in, c, kernel_size=1, bias=False)
self.conv1x1x3 = nn.Conv3d(c, c, (1, 1, k), stride=s, padding=(0, 0, p), bias=True, dilation=(1, 1, d))
self.conv1x3x1 = nn.Conv3d(c, c, (1, k, 1), stride=s, padding=(0, p, 0), bias=True, dilation=(1, d, 1))
self.conv3x1x1 = nn.Conv3d(c, c, (k, 1, 1), stride=s, padding=(p, 0, 0), bias=True, dilation=(d, 1, 1))
self.conv_out = nn.Conv3d(c, c_out, kernel_size=1, bias=False)
self.residual = residual
def forward(self, x):
y0 = self.conv_in(x)
y0 = F.relu(y0, inplace=True)
y1 = self.conv1x1x3(y0)
y1 = F.relu(y1, inplace=True)
y2 = self.conv1x3x1(y1) + y1
y2 = F.relu(y2, inplace=True)
y3 = self.conv3x1x1(y2) + y2 + y1
y3 = F.relu(y3, inplace=True)
y = self.conv_out(y3)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class DownsampleBlock3d(nn.Module):
def __init__(self, c_in, c_out, k=3, s=2, p=1):
super(DownsampleBlock3d, self).__init__()
self.conv = nn.Conv3d(c_in, c_out-c_in, kernel_size=k, stride=s, padding=p, bias=False)
self.pool = nn.MaxPool3d(2, stride=2)
# self.bn = nn.BatchNorm2d(c_out, eps=1e-3)
def forward(self, x):
y = torch.cat([self.conv(x), self.pool(x)], 1)
# y = self.bn(y)
y = F.relu(y, inplace=True)
return y
class DDR_ASPP3d(nn.Module):
def __init__(self, c_in, c, c_out, residual=False):
super(DDR_ASPP3d, self).__init__()
print('DDR_ASPP3d: c_in:{}, c:{}, c_out:{}'.format(c_in, c, c_out))
self.aspp0 = nn.Conv3d(c_in, c_out, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)
self.aspp1 = BottleneckDDR3d(c_in, c, c_out, dilation=6, residual=residual)
self.aspp2 = BottleneckDDR3d(c_in, c, c_out, dilation=12, residual=residual)
self.aspp3 = BottleneckDDR3d(c_in, c, c_out, dilation=18, residual=residual)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)),
nn.Conv3d(c_in, c_out, 1, stride=1, bias=False))
def forward(self, x):
x0 = self.aspp0(x)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x_ = self.global_avg_pool(x)
# x_ = F.upsample(x_, size=x.size()[2:], mode='trilinear', align_corners=True)
x_ = F.interpolate(x_, size=x.size()[2:], mode='trilinear', align_corners=True)
x = torch.cat((x0, x1, x2, x3, x_), dim=1)
# print(x0.shape, x1.shape, x2.shape, x3.shape, x_.shape, x.shape)
return x
| 5,848
| 36.254777
| 120
|
py
|
SSC
|
SSC-master/models/projection_layer.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Project feature tensers of 2D image to 3D space
jieli_cn@163.com
"""
import torch.nn as nn
from torch_scatter import scatter_max
class Project2Dto3D(nn.Module):
def __init__(self, w=240, h=144, d=240):
super(Project2Dto3D, self).__init__()
self.w = w
self.h = h
self.d = d
def forward(self, x2d, idx):
# bs, c, img_h, img_w = x2d.shape
bs, c, _, _ = x2d.shape
src = x2d.view(bs, c, -1)
idx = idx.view(bs, 1, -1)
index = idx.expand(-1, c, -1) # expand to c channels
x3d = x2d.new_zeros((bs, c, self.w*self.h*self.d))
x3d, _ = scatter_max(src, index, out=x3d) # dim_size=240*144*240,
x3d = x3d.view(bs, c, self.w, self.h, self.d) # (BS, c, vW, vH, vD)
x3d = x3d.permute(0, 1, 4, 3, 2) # (BS, c, vW, vH, vD)--> (BS, c, vD, vH, vW)
return x3d
| 922
| 27.84375
| 89
|
py
|
SSC
|
SSC-master/models/AICNet.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
AICNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from .projection_layer import Project2Dto3D
from .DDR import BottleneckDDR2d, BottleneckDDR3d, DownsampleBlock3d
class BasicAIC3d(nn.Module):
def __init__(self, channel, kernel=(3, 5, 7), dilation=(1, 1, 1), residual=True):
super(BasicAIC3d, self).__init__()
self.channel = channel
self.residual = residual
self.n = len(kernel) # number of kernels
self.conv_mx = nn.Conv3d(channel, 3 * self.n, (1, 1, 1), stride=1, padding=0, bias=False, dilation=1)
self.softmax = nn.Softmax(dim=2) # Applies the Softmax function in each axis
# ---- Convs of each axis
self.conv_1x1xk = nn.ModuleList()
self.conv_1xkx1 = nn.ModuleList()
self.conv_kx1x1 = nn.ModuleList()
c = channel
for _idx in range(self.n):
k = kernel[_idx]
d = dilation[_idx]
p = k // 2 * d
self.conv_1x1xk.append(nn.Conv3d(c, c, (1, 1, k), stride=1, padding=(0, 0, p), bias=True, dilation=(1, 1, d)))
self.conv_1xkx1.append(nn.Conv3d(c, c, (1, k, 1), stride=1, padding=(0, p, 0), bias=True, dilation=(1, d, 1)))
self.conv_kx1x1.append(nn.Conv3d(c, c, (k, 1, 1), stride=1, padding=(p, 0, 0), bias=True, dilation=(d, 1, 1)))
def forward(self, x):
mx = self.conv_mx(x) # (BS, 3n, D, H, W)
_bs, _tn, _d, _h, _w = mx.size()
mx = mx.view(_bs, 3, -1, _d, _h, _w) # (BS, 3, n, D, H, W)
# print("After 'view', mx.size() is: {}".format(mx.size()))
mx = self.softmax(mx) # dim=2
mx_c = torch.unsqueeze(mx, dim=3) # (BS, 3, n, 1, D, H, W)
mx_c = mx_c.expand(-1, -1, -1, self.channel, -1, -1, -1) # (BS, 3, n, c, D, H, W)
# mx1, mx2, mx3 = torch.split(mx_c, 1, dim=2) # n x (BS, 3, 1, c, D, H, W)
mx_list = torch.split(mx_c, 1, dim=2) # n x (BS, 3, 1, c, D, H, W)
mx_z_list = []
mx_y_list = []
mx_x_list = []
for i in range(self.n):
# mx_list[i] = torch.squeeze(mx_list[i], dim=2) # (BS, 3, c, D, H, W)
# mx_z, mx_y, mx_x = torch.split(mx_list[i], 1, dim=1) # 3 x (BS, 1, c, D, H, W)
mx_z, mx_y, mx_x = torch.split(torch.squeeze(mx_list[i], dim=2), 1, dim=1) # 3 x (BS, 1, c, D, H, W)
mx_z_list.append(torch.squeeze(mx_z, dim=1)) # (BS, c, D, H, W)
mx_y_list.append(torch.squeeze(mx_y, dim=1)) # (BS, c, D, H, W)
mx_x_list.append(torch.squeeze(mx_x, dim=1)) # (BS, c, D, H, W)
# ------ x ------
y_x = None
for _idx in range(self.n):
y1_x = self.conv_1x1xk[_idx](x)
y1_x = F.relu(y1_x, inplace=True)
y1_x = torch.mul(mx_x_list[_idx], y1_x)
y_x = y1_x if y_x is None else y_x + y1_x
# ------ y ------
y_y = None
for _idx in range(self.n):
y1_y = self.conv_1xkx1[_idx](y_x)
y1_y = F.relu(y1_y, inplace=True)
y1_y = torch.mul(mx_y_list[_idx], y1_y)
y_y = y1_y if y_y is None else y_y + y1_y
# ------ z ------
y_z = None
for _idx in range(self.n):
y1_z = self.conv_kx1x1[_idx](y_y)
y1_z = F.relu(y1_z, inplace=True)
y1_z = torch.mul(mx_z_list[_idx], y1_z)
y_z = y1_z if y_z is None else y_z + y1_z
y = F.relu(y_z + x, inplace=True) if self.residual else F.relu(y_z, inplace=True)
return y
class BottleneckAIC3d(nn.Module):
def __init__(self, c_in, c, c_out, kernel=(3, 5, 7), dilation=(1, 1, 1), residual=True, neighbours=0, pooling_kernel=0):
super(BottleneckAIC3d, self).__init__()
self.residual = residual
self.conv_in = nn.Conv3d(c_in, c, kernel_size=1, bias=False)
self.basic_aic = BasicAIC3d(c, kernel=kernel, dilation=dilation, residual=True)
self.conv_out = nn.Conv3d(c, c_out, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv_in(x)
y = F.relu(y, inplace=True)
y = self.basic_aic(y)
y = self.conv_out(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class SSC_RGBD_AICNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_RGBD_AICNet, self).__init__()
print('SSC_RGBD_AICNet.')
w, h, d = 240, 144, 240
k = ((3, 5, 7), (3, 5, 7), (3, 5, 7))
ks = (3, 5, 7)
# --- depth
c_in, c, c_out, dilation, residual = 1, 4, 8, 1, True
self.dep_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_dep = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.dep_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# --- RGB
c_in, c, c_out, dilation, residual = 3, 4, 8, 1, True
self.rgb_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_rgb = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.rgb_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
ck = 64
c = int(ck / 2)
dilation = ((1, 1, 1), (1, 1, 1), (1, 1, 1))
# ---- depth stream
self.res3d_1d = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[0], dilation=dilation[0], residual=True)
self.res3d_2d = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[1], dilation=dilation[1], residual=True)
self.res3d_3d = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[2], dilation=dilation[2], residual=True)
# ---- rgb stream
self.res3d_1r = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[0], dilation=dilation[0], residual=True)
self.res3d_2r = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[1], dilation=dilation[1], residual=True)
self.res3d_3r = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[2], dilation=dilation[2], residual=True)
d = (1, 1, 1)
self.aspp_1 = BottleneckAIC3d(c_in=int(ck * 4), c=ck, c_out=int(ck * 4), kernel=ks, dilation=d, residual=True)
self.aspp_2 = BottleneckAIC3d(c_in=int(ck * 4), c=ck, c_out=int(ck * 4), kernel=ks, dilation=d, residual=True)
self.conv_out = nn.Sequential(
nn.Conv3d(int(ck * 4), 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, num_classes, 1, 1, 0)
)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.xavier_uniform_(m.weight.data) # gain=1
# nn.init.constant(m.bias.data, 0)
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0, std=0.1)
def forward(self, x_depth=None, x_rgb=None, p=None):
# input: x (BS, 3L, 240L, 144L, 240L)
# print('SSC: x.shape', x.shape)
f0_r = self.rgb_feature2d(x_rgb)
f0_r = self.project_layer_rgb(f0_r, p)
f0_r = self.rgb_feature3d(f0_r)
f0_d = self.dep_feature2d(x_depth)
f0_d = self.project_layer_dep(f0_d, p)
f0_d = self.dep_feature3d(f0_d)
# -------------------------------------------------------------------
f0 = torch.add(f0_d, f0_r)
f1_d = self.res3d_1d(f0_d)
f1_r = self.res3d_1r(f0_r)
f1 = torch.add(f1_d, f1_r)
f2_d = self.res3d_2d(f1_d)
f2_r = self.res3d_2r(f1_r)
f2 = torch.add(f2_d, f2_r)
f3_d = self.res3d_3d(f2_d)
f3_r = self.res3d_3r(f2_r)
f3 = torch.add(f3_d, f3_r)
y = torch.cat((f0, f1, f2, f3), dim=1) # channels concatenate
# print('SSC: channels concatenate x', x.size()) # (BS, 256L, 60L, 36L, 60L)
y = self.aspp_1(y)
y = self.aspp_2(y)
y = self.conv_out(y) # (BS, 12L, 60L, 36L, 60L)
return y
| 8,927
| 40.142857
| 124
|
py
|
SSC
|
SSC-master/models/GRFNet.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
GRFNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from .projection_layer import Project2Dto3D
from .DDR import DDR_ASPP3d
from .DDR import BottleneckDDR2d, BottleneckDDR3d, DownsampleBlock3d
class Conv3dGRUCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size, bias=True):
super(Conv3dGRUCell, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.padding = kernel_size // 2
self.bias = bias
self.in_conv = nn.Conv3d(in_channels=self.input_channels + self.hidden_channels,
out_channels=2 * self.hidden_channels,
kernel_size=self.kernel_size,
stride=1,
dilation=1,
padding=self.padding,
bias=self.bias)
self.out_conv = nn.Conv3d(in_channels=self.input_channels + self.hidden_channels,
out_channels=self.hidden_channels,
kernel_size=self.kernel_size,
stride=1,
dilation=1,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, hidden_state):
# print('input_tensor.size()', input_tensor.size(), 'hidden_state.size()', hidden_state.size())
h_cur = hidden_state
combined = torch.cat((input_tensor, h_cur), dim=1) # concatenate along channel axis
combined_conv = self.in_conv(combined)
cc_r, cc_z = torch.split(combined_conv, self.hidden_channels, dim=1)
# print('cc_r.size()', cc_r.size(), 'cc_z.size()', cc_z.size())
r = torch.sigmoid(cc_r) # reset gate
z = torch.sigmoid(cc_z) # update gate
h_cur_bar = h_cur * r
cc_h = self.out_conv(torch.cat((input_tensor, h_cur_bar), dim=1))
h_bar = torch.tanh(cc_h)
h_next = z * h_cur + (1 - z) * h_bar
return h_next
class SSC_RGBD_GRFNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_RGBD_GRFNet, self).__init__()
print('SSC_RGBD_GRFNet.')
# --- depth
c_in, c, c_out, dilation, residual = 1, 4, 8, 1, True
self.dep_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_dep = Project2Dto3D(240, 144, 240) # w=240, h=144, d=240
self.dep_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# --- RGB
c_in, c, c_out, dilation, residual = 3, 4, 8, 1, True
self.rgb_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_rgb = Project2Dto3D(240, 144, 240) # w=240, h=144, d=240
self.rgb_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# -------------1/4
ck = 64
c = ck // 4
# --- RGB
self.res3d_1r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=2, residual=True)
self.res3d_2r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=3, residual=True)
self.res3d_3r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=5, residual=True)
# --- Depth
self.res3d_1d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=2, residual=True)
self.res3d_2d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=3, residual=True)
self.res3d_3d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=5, residual=True)
# self.lstm = DDRConv3dLSTMCell(input_channels=128, hidden_channels=64, kernel_size=(3, 3, 3), bias=True)
self.gru = Conv3dGRUCell(input_channels=64, hidden_channels=64, kernel_size=3, bias=True)
self.aspp = DDR_ASPP3d(c_in=ck, c=16, c_out=64)
self.conv_out = nn.Sequential(
nn.Conv3d(320, 160, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(160, num_classes, 1, 1, 0)
)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
# nn.init.xavier_normal(m.weight.data, gain=math.sqrt(2. / n))
# nn.init.xavier_uniform(m.weight.data, gain=math.sqrt(2. / n))
nn.init.xavier_uniform_(m.weight.data) # gain=1
# nn.init.constant(m.bias.data, 0)
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0, std=0.1)
def forward(self, x_depth=None, x_rgb=None, p=None):
# input: x (BS, 3L, 240L, 144L, 240L)
# print('SSC: x.shape', x.shape)
if x_rgb is not None:
x0_rgb = self.rgb_feature2d(x_rgb)
x0_rgb = self.project_layer_rgb(x0_rgb, p)
x0_rgb = self.rgb_feature3d(x0_rgb)
# pass
if x_depth is not None:
x0_depth = self.dep_feature2d(x_depth)
x0_depth = self.project_layer_dep(x0_depth, p)
x0_depth = self.dep_feature3d(x0_depth)
# -------------------------------------------------------------------
# ---- 1/4
x_4_d = self.res3d_1d(x0_depth)
x_4_r = self.res3d_1r(x0_rgb)
# f1 = torch.add(x_4_d, x_4_r)
x_5_d = self.res3d_2d(x_4_d)
x_5_r = self.res3d_2r(x_4_r)
# f2 = torch.add(x_5_d, x_5_r)
x_6_d = self.res3d_3d(x_5_d)
x_6_r = self.res3d_3r(x_5_r)
# f3 = torch.add(x_6_d, x_6_r)
h0 = torch.add(x0_depth, x0_depth)
# Fusion stage: 1
h1_1 = self.gru(input_tensor=x0_depth, hidden_state=h0)
h1 = self.gru(input_tensor=x0_rgb, hidden_state=h1_1)
# Fusion stage: 2
h2_1 = self.gru(input_tensor=x_4_d, hidden_state=h1)
h2 = self.gru(input_tensor=x_4_r, hidden_state=h2_1)
# Fusion stage: 3
h3_1 = self.gru(input_tensor=x_5_d, hidden_state=h2)
h3 = self.gru(input_tensor=x_5_r, hidden_state=h3_1)
# Fusion stage: 4
h4_1 = self.gru(input_tensor=x_6_d, hidden_state=h3)
h4 = self.gru(input_tensor=x_6_r, hidden_state=h4_1)
y = self.aspp(h4)
y = self.conv_out(y) # (BS, 12L, 60L, 36L, 60L)
return y
| 7,381
| 39.119565
| 113
|
py
|
SSC
|
SSC-master/models/__init__.py
|
from .PALNet import SSC_PALNet
from .DDRNet import SSC_RGBD_DDRNet
from .AICNet import SSC_RGBD_AICNet
from .GRFNet import SSC_RGBD_GRFNet
def make_model(modelname, num_classes):
if modelname == 'palnet':
return SSC_PALNet(num_classes)
if modelname == 'ddrnet':
return SSC_RGBD_DDRNet(num_classes)
if modelname == 'aicnet':
return SSC_RGBD_AICNet(num_classes)
if modelname == 'grfnet':
return SSC_RGBD_GRFNet(num_classes)
__all__ = ["make_model"]
| 499
| 25.315789
| 43
|
py
|
SSC
|
SSC-master/models/DDRNet.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
DDRNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from .projection_layer import Project2Dto3D
from .DDR import DDR_ASPP3d
from .DDR import BottleneckDDR2d, BottleneckDDR3d, DownsampleBlock3d
# DDRNet
# ----------------------------------------------------------------------
class SSC_RGBD_DDRNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_RGBD_DDRNet, self).__init__()
print('SSC_RGBD_DDRNet: RGB and Depth streams with DDR blocks for Semantic Scene Completion')
w, h, d = 240, 144, 240
# --- depth
c_in, c, c_out, dilation, residual = 1, 4, 8, 1, True
self.dep_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_dep = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.dep_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=4, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# --- RGB
c_in, c, c_out, dilation, residual = 3, 4, 8, 1, True
self.rgb_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_rgb = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.rgb_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=4, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# -------------1/4
# ck = 256
# self.ds = DownsamplerBlock_3d(64, ck)
ck = 64
c = 16
# c_in, c, c_out, kernel=3, stride=1, dilation=1, residual=True
self.res3d_1d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=2, residual=True)
self.res3d_2d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=3, residual=True)
self.res3d_3d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=5, residual=True)
self.res3d_1r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=2, residual=True)
self.res3d_2r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=3, residual=True)
self.res3d_3r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=5, residual=True)
self.aspp = DDR_ASPP3d(c_in=int(ck * 4), c=16, c_out=64)
# self.aspp = DDR_ASPP3d(c_in=int(ck * 4), c=64, c_out=int(ck * 4))
# 64 * 5 = 320
self.conv_out = nn.Sequential(
nn.Conv3d(320, 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, num_classes, 1, 1, 0)
)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.xavier_uniform_(m.weight.data) # gain=1
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0, std=0.1)
def forward(self, x_depth=None, x_rgb=None, p=None):
# input: x (BS, 3L, 240L, 144L, 240L)
# print('SSC: x.shape', x.shape)
x0_rgb = self.rgb_feature2d(x_rgb)
x0_rgb = self.project_layer_rgb(x0_rgb, p)
x0_rgb = self.rgb_feature3d(x0_rgb)
x0_depth = self.dep_feature2d(x_depth)
x0_depth = self.project_layer_dep(x0_depth, p)
x0_depth = self.dep_feature3d(x0_depth)
f0 = torch.add(x0_depth, x0_rgb)
x_4_d = self.res3d_1d(x0_depth)
x_4_r = self.res3d_1r(x0_rgb)
f1 = torch.add(x_4_d, x_4_r)
x_5_d = self.res3d_2d(x_4_d)
x_5_r = self.res3d_2r(x_4_r)
f2 = torch.add(x_5_d, x_5_r)
x_6_d = self.res3d_3d(x_5_d)
x_6_r = self.res3d_3r(x_5_r)
f3 = torch.add(x_6_d, x_6_r)
x = torch.cat((f0, f1, f2, f3), dim=1) # channels concatenate
# print('SSC: channels concatenate x', x.size()) # (BS, 256L, 60L, 36L, 60L)
x = self.aspp(x)
y = self.conv_out(x) # (BS, 12L, 60L, 36L, 60L)
return y
| 4,722
| 36.784
| 101
|
py
|
SSC
|
SSC-master/voxel_utils/setup.py
|
import os
from distutils.core import setup, Extension
import numpy as np
os.environ["CC"] = "g++"
os.environ["CXX"] = "g++"
if 'CUDA_PATH' in os.environ:
CUDA_PATH = os.environ['CUDA_PATH']
else:
print("Could not find CUDA_PATH in environment variables. Defaulting to /usr/local/cuda!")
CUDA_PATH = "/usr/local/cuda"
if not os.path.isdir(CUDA_PATH):
print("CUDA_PATH {} not found. Switching to CPU!")
setup(name = 'VoxelUtils', version = '1.0', \
ext_modules = [
Extension('VoxelUtils', ['voxel_util_module.c'],
include_dirs=[np.get_include()],
libraries=["voxelutil"],
extra_link_args = ["-fopenmp"],
library_dirs = ["."]
)])
else:
setup(name = 'VoxelUtils', version = '1.0', \
ext_modules = [
Extension('VoxelUtils', ['voxel_util_module.c'],
include_dirs=[np.get_include(), os.path.join(CUDA_PATH, "include")],
libraries=["voxelutil", "cudart"],
library_dirs = [".", os.path.join(CUDA_PATH, "lib64")]
)])
| 1,031
| 31.25
| 93
|
py
|
SSC
|
SSC-master/voxel_utils/scripts/configure.py
|
from distutils import sysconfig
print(sysconfig.get_config_var('LDSHARED').replace("gcc", "g++"))
| 98
| 32
| 65
|
py
|
SSC
|
SSC-master/dataloaders/dataloader.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Class of pytorch data loader
---
Jie Li
jieli_cn@163.com
Nanjing University of Science and Technology
Aug 10, 2019
"""
import glob
import imageio
import numpy as np
import numpy.matlib
import torch.utils.data
from pathlib import Path
from torchvision import transforms
from config import colorMap
# C_NUM = 12 # number of classes
# 'empty','ceiling','floor','wall','window','chair','bed','sofa','table','tvs','furn','objs'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
seg_class_map = [0, 1, 2, 3, 4, 11, 5, 6, 7, 8, 8, 10, 10, 10, 11, 11, 9, 8, 11, 11, 11,
11, 11, 11, 11, 11, 11, 10, 10, 11, 8, 10, 11, 9, 11, 11, 11] # 0 - 11
# 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
class NYUDataset(torch.utils.data.Dataset):
def __init__(self, root, istest=False):
self.param = {'voxel_size': (240, 144, 240),
'voxel_unit': 0.02, # 0.02m, length of each grid == 20mm
'cam_k': [[518.8579, 0, 320], # K is [fx 0 cx; 0 fy cy; 0 0 1];
[0, 518.8579, 240], # cx = K(1,3); cy = K(2,3);
[0, 0, 1]], # fx = K(1,1); fy = K(2,2);
}
#
self.subfix = 'npz'
self.istest = istest
self.downsample = 4 # int, downsample = 4, in labeled data, get 1 voxel from each 4
self.filepaths = self.get_filelist(root, self.subfix)
# Converts a PIL Image or numpy.ndarray (H x W x C) in the range [0, 255] \
# to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
self.transforms_rgb = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
print('Dataset:{} files'.format(len(self.filepaths)))
correct = False
if self.subfix == 'npz' and correct:
self._correct_labels()
def _correct_labels(self):
print ("Correcting labels with projection indices!")
N = len(self.filepaths)
for index in range(N):
print ("Correcting {}/{} label..".format(index+1, N))
#name = self.filepaths[index][48:-11]
filepath = Path(self.filepaths[index])
target_file = filepath.parent.with_name(filepath.parent.stem + "_fixed") / filepath.stem
target_file.parent.mkdir(parents=True, exist_ok=True)
if target_file.with_suffix('.npz').exists():
print("{} exists! skipping...".format(target_file.with_suffix('.npz')))
continue
with np.load(self.filepaths[index]) as npz_file:
# print(npz_file.files)
rgb_tensor = npz_file['rgb']
depth_tensor = npz_file['depth']
tsdf_hr = npz_file['tsdf_hr'] # flipped TSDF, (240, 144, 240, 1)
# target_hr = npz_file['target_hr']
tsdf_lr = npz_file['tsdf_lr']
target_lr = npz_file['target_lr']
position = npz_file['position']
vox_origin, cam_pose, _ = self._read_rle('/media/scratch1/mcheem/datasets/depthbin/{}/{}.bin'.format("NYUtest" if self.istest else "NYUtrain", filepath.stem[:-7]))
depth = self._read_depth('/media/scratch1/mcheem/datasets/depthbin/{}/{}.png'.format("NYUtest" if self.istest else "NYUtrain", filepath.stem[:-7]))
_, _, position2, position4 = self._depth2voxel(depth, cam_pose, vox_origin, self.param)
np.savez_compressed(target_file , rgb=rgb_tensor, depth=depth_tensor, tsdf_hr=tsdf_hr, target_lr=target_lr, position=position2, tsdf_lr=tsdf_lr)
def __getitem__(self, index):
_name = self.filepaths[index][:-4]
# print(_name)
# ---------------------------------------------------------------------------
# Processing repackaged data provided by DDRNet
# ---------------------------------------------------------------------------
if self.subfix == 'npz':
with np.load(self.filepaths[index]) as npz_file:
# print(npz_file.files)
rgb_tensor = npz_file['rgb']
depth_tensor = npz_file['depth']
tsdf_hr = npz_file['tsdf_hr'] # flipped TSDF, (240, 144, 240, 1)
# target_hr = npz_file['target_hr']
target_lr = npz_file['target_lr']
position = npz_file['position']
if self.istest:
tsdf_lr = npz_file['tsdf_lr'] # ( 60, 36, 60)
# nonempty = self.get_nonempty(tsdf, 'TSDF')
nonempty = self.get_nonempty2(tsdf_lr, target_lr, 'TSDF') # 这个更符合SUNCG的做法
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, nonempty.T, position, _name + '.png'
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, position, _name + '.png'
# else:
#
# ---------------------------------------------------------------------------
# Processing data provided by SSCNet
# ---------------------------------------------------------------------------
# --- read depth, shape: (h, w)
depth = self._read_depth(_name + '.png') #
depth_tensor = depth.reshape((1,) + depth.shape)
# --- read rgb image, shape: (h, w, 3)
# rgb = self._read_rgb(_name + '.jpg') #
rgb = self._read_rgb(_name[:-4] + 'rgb.png')
rgb_tensor = self.transforms_rgb(rgb) # channel first, shape: (3, h, w)
# --- read ground truth
vox_origin, cam_pose, rle = self._read_rle(_name + '.bin')
target_hr = self._rle2voxel(rle, self.param['voxel_size'], _name + '.bin')
target_lr = self._downsample_label(target_hr, self.param['voxel_size'], self.downsample)
binary_vox, _, position, position4 = self._depth2voxel(depth, cam_pose, vox_origin, self.param)
npz_file = np.load(_name + '.npz')
tsdf_hr = npz_file['tsdf'] # SUNCG (W, H, D)
if self.istest:
tsdf_lr = self._downsample_tsdf(tsdf_hr, self.downsample)
# nonempty = self.get_nonempty(tsdf, 'TSDF')
nonempty = self.get_nonempty2(tsdf_lr, target_lr, 'TSDF') # 这个更符合SUNCG的做法
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, nonempty.T, position, _name + '.png'
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, position, _name + '.png'
def __len__(self):
return len(self.filepaths)
def get_filelist(self, root, subfix):
if root is None:
raise Exception("Oops! 'root' is None, please set the right file path.")
_filepaths = list()
if isinstance(root, list): # 将多个root
for root_i in root:
fp = glob.glob(root_i + '/*.' + subfix)
fp.sort()
_filepaths.extend(fp)
elif isinstance(root, str):
_filepaths = glob.glob(root + '/*.' + subfix) # List all files in data folder
_filepaths.sort()
if len(_filepaths) == 0:
raise Exception("Oops! That was no valid data in '{}'.".format(root))
return _filepaths
@staticmethod
def _read_depth(depth_filename):
r"""Read a depth image with size H x W
and save the depth values (in millimeters) into a 2d numpy array.
The depth image file is assumed to be in 16-bit PNG format, depth in millimeters.
"""
# depth = misc.imread(depth_filename) / 8000.0 # numpy.float64
depth = imageio.imread(depth_filename) / 8000.0 # numpy.float64
# assert depth.shape == (img_h, img_w), 'incorrect default size'
depth = np.asarray(depth)
return depth
@staticmethod
def _read_rgb(rgb_filename): # 0.01s
r"""Read a RGB image with size H x W
"""
# rgb = misc.imread(rgb_filename) # <type 'numpy.ndarray'>, numpy.uint8, (480, 640, 3)
rgb = imageio.imread(rgb_filename) # <type 'numpy.ndarray'>, numpy.uint8, (480, 640, 3)
# rgb = np.rollaxis(rgb, 2, 0) # (H, W, 3)-->(3, H, W)
return rgb
@staticmethod
def _read_rle(rle_filename): # 0.0005s
r"""Read RLE compression data
Return:
vox_origin,
cam_pose,
vox_rle, voxel label data from file
Shape:
vox_rle, (240, 144, 240)
"""
fid = open(rle_filename, 'rb')
vox_origin = np.fromfile(fid, np.float32, 3).T # Read voxel origin in world coordinates
cam_pose = np.fromfile(fid, np.float32, 16).reshape((4, 4)) # Read camera pose
vox_rle = np.fromfile(fid, np.uint32).reshape((-1, 1)).T # Read voxel label data from file
vox_rle = np.squeeze(vox_rle) # 2d array: (1 x N), to 1d array: (N , )
fid.close()
return vox_origin, cam_pose, vox_rle
# this version takes 0.9s
@classmethod
def _rle2voxel(cls, rle, voxel_size=(240, 144, 240), rle_filename=''):
r"""Read voxel label data from file (RLE compression), and convert it to fully occupancy labeled voxels.
In the data loader of pytorch, only single thread is allowed.
For multi-threads version and more details, see 'readRLE.py'.
output: seg_label: 3D numpy array, size 240 x 144 x 240
"""
# ---- Read RLE
# vox_origin, cam_pose, rle = cls._read_rle(rle_filename)
# ---- Uncompress RLE, 0.9s
seg_label = np.zeros(voxel_size[0] * voxel_size[1] * voxel_size[2], dtype=np.uint8) # segmentation label
vox_idx = 0
for idx in range(int(rle.shape[0] / 2)):
check_val = rle[idx * 2]
check_iter = rle[idx * 2 + 1]
if check_val >= 37 and check_val != 255: # 37 classes to 12 classes
print('RLE {} check_val: {}'.format(rle_filename, check_val))
# seg_label_val = 1 if check_val < 37 else 0 # 37 classes to 2 classes: empty or occupancy
# seg_label_val = 255 if check_val == 255 else seg_class_map[check_val]
seg_label_val = seg_class_map[check_val] if check_val != 255 else 255 # 37 classes to 12 classes
seg_label[vox_idx: vox_idx + check_iter] = np.matlib.repmat(seg_label_val, 1, check_iter)
vox_idx = vox_idx + check_iter
seg_label = seg_label.reshape(voxel_size) # 3D array, size 240 x 144 x 240
return seg_label
# this version takes 3s
@classmethod # method 2, new
def _depth2voxel(cls, depth, cam_pose, vox_origin, param):
cam_k = param['cam_k']
voxel_size = param['voxel_size'] # (240, 144, 240)
unit = param['voxel_unit'] # 0.02
# ---- Get point in camera coordinate
H, W = depth.shape
gx, gy = np.meshgrid(range(W), range(H))
pt_cam = np.zeros((H, W, 3), dtype=np.float32)
pt_cam[:, :, 0] = (gx - cam_k[0][2]) * depth / cam_k[0][0] # x
pt_cam[:, :, 1] = (gy - cam_k[1][2]) * depth / cam_k[1][1] # y
pt_cam[:, :, 2] = depth # z, in meter
# ---- Get point in world coordinate
p = cam_pose
pt_world = np.zeros((H, W, 3), dtype=np.float32)
pt_world[:, :, 0] = p[0][0] * pt_cam[:, :, 0] + p[0][1] * pt_cam[:, :, 1] + p[0][2] * pt_cam[:, :, 2] + p[0][3]
pt_world[:, :, 1] = p[1][0] * pt_cam[:, :, 0] + p[1][1] * pt_cam[:, :, 1] + p[1][2] * pt_cam[:, :, 2] + p[1][3]
pt_world[:, :, 2] = p[2][0] * pt_cam[:, :, 0] + p[2][1] * pt_cam[:, :, 1] + p[2][2] * pt_cam[:, :, 2] + p[2][3]
pt_world[:, :, 0] = pt_world[:, :, 0] - vox_origin[0]
pt_world[:, :, 1] = pt_world[:, :, 1] - vox_origin[1]
pt_world[:, :, 2] = pt_world[:, :, 2] - vox_origin[2]
# ---- Aline the coordinates with labeled data (RLE .bin file)
pt_world2 = np.zeros(pt_world.shape, dtype=np.float32) # (h, w, 3)
# pt_world2 = pt_world
pt_world2[:, :, 0] = pt_world[:, :, 0] # x 水平
pt_world2[:, :, 1] = pt_world[:, :, 2] # y 高低
pt_world2[:, :, 2] = pt_world[:, :, 1] # z 深度
# pt_world2[:, :, 0] = pt_world[:, :, 1] # x 原始paper方法
# pt_world2[:, :, 1] = pt_world[:, :, 2] # y
# pt_world2[:, :, 2] = pt_world[:, :, 0] # z
# ---- World coordinate to grid/voxel coordinate
point_grid = pt_world2 / unit # Get point in grid coordinate, each grid is a voxel
point_grid = np.rint(point_grid).astype(np.int32) # .reshape((-1, 3)) # (H*W, 3) (H, W, 3)
# ---- crop depth to grid/voxel
# binary encoding '01': 0 for empty, 1 for occupancy
# voxel_binary = np.zeros(voxel_size, dtype=np.uint8) # (W, H, D)
voxel_binary = np.zeros([_ + 1 for _ in voxel_size], dtype=np.float32) # (W, H, D)
voxel_xyz = np.zeros(voxel_size + (3,), dtype=np.float32) # (W, H, D, 3)
position = np.zeros((H, W), dtype=np.int32)
position4 = np.zeros((H, W), dtype=np.int32)
# position44 = np.zeros((H/4, W/4), dtype=np.int32)
voxel_size_lr = (voxel_size[0] // 4, voxel_size[1] // 4, voxel_size[2] // 4)
for h in range(H):
for w in range(W):
i_x, i_y, i_z = point_grid[h, w, :]
if 0 <= i_x < voxel_size[0] and 0 <= i_y < voxel_size[1] and 0 <= i_z < voxel_size[2]:
voxel_binary[i_x, i_y, i_z] = 1 # the bin has at least one point (bin is not empty)
voxel_xyz[i_x, i_y, i_z, :] = point_grid[h, w, :]
# position[h, w, :] = point_grid[h, w, :] # 记录图片上的每个像素对应的voxel位置
# 记录图片上的每个像素对应的voxel位置
position[h, w] = np.ravel_multi_index(point_grid[h, w, :], voxel_size)
# TODO 这个project的方式可以改进
position4[h, ] = np.ravel_multi_index((point_grid[h, w, :] / 4).astype(np.int32), voxel_size_lr)
# position44[h / 4, w / 4] = np.ravel_multi_index(point_grid[h, w, :] / 4, voxel_size_lr)
# output --- 3D Tensor, 240 x 144 x 240
del depth, gx, gy, pt_cam, pt_world, pt_world2, point_grid # Release Memory
return voxel_binary, voxel_xyz, position, position4 # (W, H, D), (W, H, D, 3)
# this version takes about 0.6s on CPU
@staticmethod
def _downsample_label(label, voxel_size=(240, 144, 240), downscale=4):
r"""downsample the labeled data,
Shape:
label, (240, 144, 240)
label_downscale, if downsample==4, then (60, 36, 60)
"""
if downscale == 1:
return label
ds = downscale
small_size = (voxel_size[0] // ds, voxel_size[1] // ds, voxel_size[2] // ds) # small size
label_downscale = np.zeros(small_size, dtype=np.uint8)
empty_t = 0.95 * ds * ds * ds # threshold
s01 = small_size[0] * small_size[1]
label_i = np.zeros((ds, ds, ds), dtype=np.int32)
for i in range(small_size[0]*small_size[1]*small_size[2]):
z = int(i / s01)
y = int((i - z * s01) / small_size[0])
x = int(i - z * s01 - y * small_size[0])
# z, y, x = np.unravel_index(i, small_size) # 速度更慢了
# print(x, y, z)
label_i[:, :, :] = label[x * ds:(x + 1) * ds, y * ds:(y + 1) * ds, z * ds:(z + 1) * ds]
label_bin = label_i.flatten() # faltten 返回的是真实的数组,需要分配新的内存空间
# label_bin = label_i.ravel() # 将多维数组变成 1维数组,而ravel 返回的是数组的视图
# zero_count_0 = np.sum(label_bin == 0)
# zero_count_255 = np.sum(label_bin == 255)
zero_count_0 = np.array(np.where(label_bin == 0)).size # 要比sum更快
zero_count_255 = np.array(np.where(label_bin == 255)).size
zero_count = zero_count_0 + zero_count_255
if zero_count > empty_t:
label_downscale[x, y, z] = 0 if zero_count_0 > zero_count_255 else 255
else:
# label_i_s = label_bin[np.nonzero(label_bin)] # get the none empty class labels
label_i_s = label_bin[np.where(np.logical_and(label_bin > 0, label_bin < 255))]
label_downscale[x, y, z] = np.argmax(np.bincount(label_i_s))
return label_downscale
@staticmethod
def _downsample_tsdf(tsdf, downscale=4): # 仅在Get None empty 时会用到
r"""
Shape:
tsdf, (240, 144, 240)
tsdf_downscale, (60, 36, 60), (stsdf.shape[0]/4, stsdf.shape[1]/4, stsdf.shape[2]/4)
"""
if downscale == 1:
return tsdf
# TSDF_EMPTY = np.float32(0.001)
# TSDF_SURFACE: 1, sign >= 0
# TSDF_OCCLUD: sign < 0 np.float32(-0.001)
ds = downscale
small_size = (int(tsdf.shape[0] / ds), int(tsdf.shape[1] / ds), int(tsdf.shape[2] / ds))
tsdf_downscale = np.ones(small_size, dtype=np.float32) * np.float32(0.001) # init 0.001 for empty
s01 = small_size[0] * small_size[1]
tsdf_sr = np.ones((ds, ds, ds), dtype=np.float32) # search region
for i in range(small_size[0] * small_size[1] * small_size[2]):
z = int(i / s01)
y = int((i - z * s01) / small_size[0])
x = int(i - z * s01 - y * small_size[0])
tsdf_sr[:, :, :] = tsdf[x * ds:(x + 1) * ds, y * ds:(y + 1) * ds, z * ds:(z + 1) * ds]
tsdf_bin = tsdf_sr.flatten()
# none_empty_count = np.array(np.where(tsdf_bin != TSDF_EMPTY)).size
none_empty_count = np.array(np.where(np.logical_or(tsdf_bin <= 0, tsdf_bin == 1))).size
if none_empty_count > 0:
# surface_count = np.array(np.where(stsdf_bin == 1)).size
# occluded_count = np.array(np.where(stsdf_bin == -2)).size
# surface_count = np.array(np.where(tsdf_bin > 0)).size # 这个存在问题
surface_count = np.array(np.where(tsdf_bin == 1)).size
# occluded_count = np.array(np.where(tsdf_bin < 0)).size
# tsdf_downscale[x, y, z] = 0 if surface_count > occluded_count else np.float32(-0.001)
tsdf_downscale[x, y, z] = 1 if surface_count > 2 else np.float32(-0.001) # 1 or 0 ?
# else:
# tsdf_downscale[x, y, z] = empty # TODO 不应该将所有值均设为0.001
return tsdf_downscale
@staticmethod
def get_nonempty(voxels, encoding): # Get none empty from depth voxels
data = np.zeros(voxels.shape, dtype=np.float32) # init 0 for empty
# if encoding == 'STSDF': # surface, empty, occulted: 1, 0, -1
# data[voxels == 1] = 1
# return data
if encoding == 'STSDF': # surface, empty, occulted: 1, 0, -1
data[voxels != 0] = 1
surface = np.array(np.where(voxels == 1)) # surface=1
elif encoding == 'TSDF':
data[np.where(np.logical_or(voxels <= 0, voxels == 1))] = 1
surface = np.array(np.where(voxels == 1)) # surface
# surface = np.array(np.where(np.logical_and(voxels > 0, voxels != np.float32(0.001)))) # surface
else:
raise Exception("Encoding error: {} is not validate".format(encoding))
min_idx = np.amin(surface, axis=1)
max_idx = np.amax(surface, axis=1)
# print('min_idx, max_idx', min_idx, max_idx)
# data[:a], data[a]不包含在内, data[b:], data[b]包含在内
# min_idx = min_idx
max_idx = max_idx + 1
# 本该扩大一圈就够了,但由于GT标注的不是很精确,故在高分辨率情况下,多加大一圈
# min_idx = min_idx - 1
# max_idx = max_idx + 2
min_idx[min_idx < 0] = 0
max_idx[0] = min(voxels.shape[0], max_idx[0])
max_idx[1] = min(voxels.shape[1], max_idx[1])
max_idx[2] = min(voxels.shape[2], max_idx[2])
data[:min_idx[0], :, :] = 0 # data[:a], data[a]不包含在内
data[:, :min_idx[1], :] = 0
data[:, :, :min_idx[2]] = 0
data[max_idx[0]:, :, :] = 0 # data[b:], data[b]包含在内
data[:, max_idx[1]:, :] = 0
data[:, :, max_idx[2]:] = 0
return data
@staticmethod
def get_nonempty2(voxels, target, encoding): # Get none empty from depth voxels
data = np.ones(voxels.shape, dtype=np.float32) # init 1 for none empty
data[target == 255] = 0
if encoding == 'STSDF': # surface, empty, occulted: 1, 0, -1
data[voxels == 0] = 0
elif encoding == 'TSDF':
# --0
# data[voxels == np.float32(0.001)] = 0
# --1
# data[voxels > 0] = 0
# --2
# data[voxels >= np.float32(0.001)] = 0
# --3
data[voxels >= np.float32(0.001)] = 0
data[voxels == 1] = 1
return data
@staticmethod
def _get_xyz(size):
"""x 水平 y高低 z深度"""
_x = np.zeros(size, dtype=np.int32)
_y = np.zeros(size, dtype=np.int32)
_z = np.zeros(size, dtype=np.int32)
for i_h in range(size[0]): # x, y, z
_x[i_h, :, :] = i_h # x, left-right flip
for i_w in range(size[1]):
_y[:, i_w, :] = i_w # y, up-down flip
for i_d in range(size[2]):
_z[:, :, i_d] = i_d # z, front-back flip
return _x, _y, _z
@classmethod
def labeled_voxel2ply(cls, vox_labeled, ply_filename): #
"""Save labeled voxels to disk in colored-point cloud format: x y z r g b, with '.ply' suffix
vox_labeled.shape: (W, H, D)
""" #
# ---- Check data type, numpy ndarray
if type(vox_labeled) is not np.ndarray:
raise Exception("Oops! Type of vox_labeled should be 'numpy.ndarray', not {}.".format(type(vox_labeled)))
# ---- Check data validation
if np.amax(vox_labeled) == 0:
print('Oops! All voxel is labeled empty.')
return
# ---- get size
size = vox_labeled.shape
# print('vox_labeled.shape:', vox_labeled.shape)
# ---- Convert to list
vox_labeled = vox_labeled.flatten()
# ---- Get X Y Z
_x, _y, _z = cls._get_xyz(size)
_x = _x.flatten()
_y = _y.flatten()
_z = _z.flatten()
# print('_x.shape', _x.shape)
# ---- Get R G B
vox_labeled[vox_labeled == 255] = 0 # empty
# vox_labeled[vox_labeled == 255] = 12 # ignore
_rgb = colorMap[vox_labeled[:]]
# print('_rgb.shape:', _rgb.shape)
# ---- Get X Y Z R G B
xyz_rgb = zip(_x, _y, _z, _rgb[:, 0], _rgb[:, 1], _rgb[:, 2]) # python2.7
xyz_rgb = list(xyz_rgb) # python3
# print('xyz_rgb.shape-1', xyz_rgb.shape)
# xyz_rgb = zip(_z, _y, _x, _rgb[:, 0], _rgb[:, 1], _rgb[:, 2]) # 将X轴和Z轴交换,用于meshlab显示
# ---- Get ply data without empty voxel
xyz_rgb = np.array(xyz_rgb)
# print('xyz_rgb.shape-1', xyz_rgb.shape)
ply_data = xyz_rgb[np.where(vox_labeled > 0)]
if len(ply_data) == 0:
raise Exception("Oops! That was no valid ply data.")
ply_head = 'ply\n' \
'format ascii 1.0\n' \
'element vertex %d\n' \
'property float x\n' \
'property float y\n' \
'property float z\n' \
'property uchar red\n' \
'property uchar green\n' \
'property uchar blue\n' \
'end_header' % len(ply_data)
# ---- Save ply data to disk
np.savetxt(ply_filename, ply_data, fmt="%d %d %d %d %d %d", header=ply_head, comments='') # It takes 20s
del vox_labeled, _x, _y, _z, _rgb, xyz_rgb, ply_data, ply_head
# print('Saved-->{}'.format(ply_filename))
if __name__ == '__main__':
# ---- Data loader
data_dir = '/home/amax/jie/Data_zoo/NYU_SSC/NYUCADval40'
# ------------------------------------------------
data_loader = torch.utils.data.DataLoader(
dataset=NYUDataset(data_dir),
batch_size=1,
shuffle=False,
num_workers=1
)
for step, (rgb_tesnor, depth, target_lr, position, _filename) in enumerate(data_loader):
print('step:', step, _filename)
| 24,291
| 44.920605
| 180
|
py
|
SSC
|
SSC-master/dataloaders/__init__.py
|
from .dataloader import NYUDataset
from config import Path
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset:
base_dirs = Path.db_root_dir(args.dataset)
print('Training data:{}'.format(base_dirs['train']))
train_loader = DataLoader(
dataset=NYUDataset(base_dirs['train'], istest=False),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers
)
print('Validate data:{}'.format(base_dirs['val']))
val_loader = DataLoader(
dataset=NYUDataset(base_dirs['val'], istest=True),
batch_size=args.batch_size, # 1 * torch.cuda.device_count(), 1 for each GPU
shuffle=False,
num_workers=args.workers # 1 * torch.cuda.device_count()
)
return train_loader, val_loader
| 883
| 28.466667
| 88
|
py
|
SSC
|
SSC-master/utils/seed.py
|
import numpy as np
import scipy.misc
import os
import random
import torch
def seed_torch(seed=3055):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
| 416
| 18.857143
| 66
|
py
|
SSC
|
SSC-master/utils/utils.py
|
import numpy as np
import config
import VoxelUtils as vu
def compute_tsdf(depth_data, vox_origin, cam_k, cam_pose0, voxel_size=(240,144,240)):
"""
Computes TSDF grid from a depth map at the specified voxel origin using CUDA C++ implementation wrapped
as python module.
"""
# cam_info_CPU, vox_info_CPU,depth_data_CPU, vox_tsdf_CPU, depth_mapping_idxs_CPU
height, width = depth_data.shape
vox_tsdf = np.ones(voxel_size[0] * voxel_size[1] * voxel_size[2], dtype=np.float64)
depth_mapping_idxs = np.zeros(width * height, dtype=np.float64)
voxel_occupancy = np.zeros(voxel_size[0] * voxel_size[1] * voxel_size[2], dtype=np.float64 )
# setup camera info
cam_info = np.zeros(27,dtype=np.float64)
cam_info[0] = width
cam_info[1] = height
for i in range(9):
cam_info[i + 2] = np.asarray(cam_k).reshape(-1)[i]
for i in range(16):
cam_info[i + 11] = cam_pose0.reshape(-1)[i]
# setup voxel info
vox_info = np.zeros(8,dtype=np.float64)
vox_info[0] = 0.02; # vox unit
vox_info[1] = 0.04; # vox margin of two voxels
for i in range(3):
vox_info[i + 2] = voxel_size[i]
for i in range(3):
vox_info[i + 5] = vox_origin[i]
depth_data_reshaped = depth_data.astype(np.float64).reshape(-1)
vu.compute_tsdf(cam_info, vox_info, depth_data_reshaped, vox_tsdf, depth_mapping_idxs, voxel_occupancy)
#print (vox_tsdf_CPU.shape)
return vox_tsdf.reshape(voxel_size), depth_mapping_idxs.reshape( height, width), \
voxel_occupancy.reshape(voxel_size).astype(np.int64) * 11
def get_origin_from_depth_image(depth, cam_k, cam_pose):
"""
Get Point cloud origin in world coordinates
"""
#cam_k = param['cam_k']
#voxel_size = (240, 144, 240)
#unit = 0.02
# ---- Get point in camera coordinate
H, W = depth.shape
gx, gy = np.meshgrid(range(W), range(H))
pt_cam = np.zeros((H, W, 3), dtype=np.float32)
pt_cam[:, :, 0] = (gx - cam_k[0][2]) * depth / cam_k[0][0] # x
pt_cam[:, :, 1] = (gy - cam_k[1][2]) * depth / cam_k[1][1] # y
pt_cam[:, :, 2] = depth # z, in meter
#_save_point_cloud(pt_cam, "point_cloud_cam.txt")
# ---- Get point in world coordinate
p = cam_pose
pt_world = np.zeros((H, W, 3), dtype=np.float32)
pt_world[:, :, 0] = p[0][0] * pt_cam[:, :, 0] + p[0][1] * pt_cam[:, :, 1] + p[0][2] * pt_cam[:, :, 2] + p[0][3]
pt_world[:, :, 1] = p[1][0] * pt_cam[:, :, 0] + p[1][1] * pt_cam[:, :, 1] + p[1][2] * pt_cam[:, :, 2] + p[1][3]
pt_world[:, :, 2] = p[2][0] * pt_cam[:, :, 0] + p[2][1] * pt_cam[:, :, 1] + p[2][2] * pt_cam[:, :, 2] + p[2][3]
vox_origin = pt_world.min(axis=(0,1))
return vox_origin
def _get_xyz(size):
"""x width yheight zdepth"""
_x = np.zeros(size, dtype=np.int32)
_y = np.zeros(size, dtype=np.int32)
_z = np.zeros(size, dtype=np.int32)
for i_h in range(size[0]): # x, y, z
_x[i_h, :, :] = i_h # x, left-right flip
for i_w in range(size[1]):
_y[:, i_w, :] = i_w # y, up-down flip
for i_d in range(size[2]):
_z[:, :, i_d] = i_d # z, front-back flip
return _x, _y, _z
def labeled_voxel2ply(vox_labeled, ply_filename): #
"""Save labeled voxels to disk in colored-point cloud format: x y z r g b, with '.ply' suffix
vox_labeled.shape: (W, H, D)
""" #
# ---- Check data type, numpy ndarray
if type(vox_labeled) is not np.ndarray:
raise Exception("Oops! Type of vox_labeled should be 'numpy.ndarray', not {}.".format(type(vox_labeled)))
# ---- Check data validation
if np.amax(vox_labeled) == 0:
print('Oops! All voxel is labeled empty.')
return
# ---- get size
size = vox_labeled.shape
# print('vox_labeled.shape:', vox_labeled.shape)
# ---- Convert to list
vox_labeled = vox_labeled.flatten()
# ---- Get X Y Z
_x, _y, _z = _get_xyz(size)
_x = _x.flatten()
_y = _y.flatten()
_z = _z.flatten()
# print('_x.shape', _x.shape)
# ---- Get R G B
vox_labeled[vox_labeled == 255] = 0 # empty
# vox_labeled[vox_labeled == 255] = 12 # ignore
_rgb = config.colorMap[vox_labeled[:]]
# print('_rgb.shape:', _rgb.shape)
# ---- Get X Y Z R G B
xyz_rgb = zip(_x, _y, _z, _rgb[:, 0], _rgb[:, 1], _rgb[:, 2]) # python2.7
xyz_rgb = list(xyz_rgb) # python3
# print('xyz_rgb.shape-1', xyz_rgb.shape)
# xyz_rgb = zip(_z, _y, _x, _rgb[:, 0], _rgb[:, 1], _rgb[:, 2]) # 将X轴和Z轴交换,用于meshlab显示
# ---- Get ply data without empty voxel
xyz_rgb = np.array(xyz_rgb)
# print('xyz_rgb.shape-1', xyz_rgb.shape)
ply_data = xyz_rgb[np.where(vox_labeled > 0)]
if len(ply_data) == 0:
raise Exception("Oops! That was no valid ply data.")
ply_head = 'ply\n' \
'format ascii 1.0\n' \
'element vertex %d\n' \
'property float x\n' \
'property float y\n' \
'property float z\n' \
'property uchar red\n' \
'property uchar green\n' \
'property uchar blue\n' \
'end_header' % len(ply_data)
# ---- Save ply data to disk
np.savetxt(ply_filename, ply_data, fmt="%d %d %d %d %d %d", header=ply_head, comments='') # It takes 20s
del vox_labeled, _x, _y, _z, _rgb, xyz_rgb, ply_data, ply_head
def downsample_voxel(label, voxel_size=(240, 144, 240), downscale=4):
r"""downsample the labeled data,
Shape:
label, (240, 144, 240)
label_downscale, if downsample==4, then (60, 36, 60)
"""
if downscale == 1:
return label
ds = downscale
small_size = (voxel_size[0] // ds, voxel_size[1] // ds, voxel_size[2] // ds) # small size
label_downscale = np.zeros(small_size, dtype=np.uint8)
empty_t = 0.95 * ds * ds * ds # threshold
s01 = small_size[0] * small_size[1]
label_i = np.zeros((ds, ds, ds), dtype=np.int32)
for i in range(small_size[0]*small_size[1]*small_size[2]):
z = int(i / s01)
y = int((i - z * s01) / small_size[0])
x = int(i - z * s01 - y * small_size[0])
# z, y, x = np.unravel_index(i, small_size) # 速度更慢了
# print(x, y, z)
label_i[:, :, :] = label[x * ds:(x + 1) * ds, y * ds:(y + 1) * ds, z * ds:(z + 1) * ds]
label_bin = label_i.flatten() # faltten 返回的是真实的数组,需要分配新的内存空间
# label_bin = label_i.ravel() # 将多维数组变成 1维数组,而ravel 返回的是数组的视图
# zero_count_0 = np.sum(label_bin == 0)
# zero_count_255 = np.sum(label_bin == 255)
zero_count_0 = np.array(np.where(label_bin == 0)).size # 要比sum更快
zero_count_255 = np.array(np.where(label_bin == 255)).size
zero_count = zero_count_0 + zero_count_255
if zero_count > empty_t:
label_downscale[x, y, z] = 0 if zero_count_0 > zero_count_255 else 255
else:
# label_i_s = label_bin[np.nonzero(label_bin)] # get the none empty class labels
label_i_s = label_bin[np.where(np.logical_and(label_bin > 0, label_bin < 255))]
label_downscale[x, y, z] = np.argmax(np.bincount(label_i_s))
return label_downscale
def save_point_cloud(points, filename):
with open(filename, "w") as f:
for point in points.reshape(-1,3):
f.write("{};{};{}\n".format(point[0],point[1], point[2]))
def depth2voxel(depth, cam_pose, vox_origin, cam_k, voxel_unit=0.02, voxel_size = (240, 144, 240), ):
#cam_k = param['cam_k']
#voxel_size = (240, 144, 240)
#unit = 0.02
# ---- Get point in camera coordinate
H, W = depth.shape
gx, gy = np.meshgrid(range(W), range(H))
pt_cam = np.zeros((H, W, 3), dtype=np.float32)
pt_cam[:, :, 0] = (gx - cam_k[0][2]) * depth / cam_k[0][0] # x
pt_cam[:, :, 1] = (gy - cam_k[1][2]) * depth / cam_k[1][1] # y
pt_cam[:, :, 2] = depth # z, in meter
#_save_point_cloud(pt_cam, "point_cloud_cam.txt")
# ---- Get point in world coordinate
p = cam_pose
pt_world = np.zeros((H, W, 3), dtype=np.float32)
pt_world[:, :, 0] = p[0][0] * pt_cam[:, :, 0] + p[0][1] * pt_cam[:, :, 1] + p[0][2] * pt_cam[:, :, 2] + p[0][3]
pt_world[:, :, 1] = p[1][0] * pt_cam[:, :, 0] + p[1][1] * pt_cam[:, :, 1] + p[1][2] * pt_cam[:, :, 2] + p[1][3]
pt_world[:, :, 2] = p[2][0] * pt_cam[:, :, 0] + p[2][1] * pt_cam[:, :, 1] + p[2][2] * pt_cam[:, :, 2] + p[2][3]
vox_origin = pt_world.min(axis=(0,1))
pt_world[:, :, 0] = pt_world[:, :, 0] - vox_origin[0]
pt_world[:, :, 1] = pt_world[:, :, 1] - vox_origin[1]
pt_world[:, :, 2] = pt_world[:, :, 2] - vox_origin[2]
#_save_point_cloud(pt_world, "point_cloud_world.txt")
# ---- Aline the coordinates with labeled data (RLE .bin file)
pt_world2 = np.zeros(pt_world.shape, dtype=np.float32) # (h, w, 3)
#pt_world2 = pt_world
# pt_world2[:, :, 0] = pt_world[:, :, 0] # x 水平
# pt_world2[:, :, 1] = pt_world[:, :, 2] # y 高低
# pt_world2[:, :, 2] = pt_world[:, :, 1] # z 深度
pt_world2[:, :, 0] = pt_world[:, :, 1] # x 原始paper方法
pt_world2[:, :, 1] = pt_world[:, :, 2] # y
pt_world2[:, :, 2] = pt_world[:, :, 0] # z
#_save_point_cloud(pt_world2, "point_cloud_world_aligned.txt")
# ---- World coordinate to grid/voxel coordinate
point_grid = pt_world2 / voxel_unit # Get point in grid coordinate, each grid is a voxel
point_grid = np.rint(point_grid).astype(np.int32) # .reshape((-1, 3)) # (H*W, 3) (H, W, 3)
# ---- crop depth to grid/voxel
# binary encoding '01': 0 for empty, 1 for occupancy
# voxel_binary = np.zeros(voxel_size, dtype=np.uint8) # (W, H, D)
voxel_binary = np.zeros([v for v in voxel_size], dtype=int) # (W, H, D)
voxel_xyz = np.zeros(voxel_size + (3,), dtype=np.float32) # (W, H, D, 3)
position = np.zeros((H, W), dtype=np.int32)
position4 = np.zeros((H, W), dtype=np.int32)
# position44 = np.zeros((H/4, W/4), dtype=np.int32)
voxel_size_lr = (voxel_size[0] // 4, voxel_size[1] // 4, voxel_size[2] // 4)
for h in range(H):
for w in range(W):
i_x, i_y, i_z = point_grid[h, w, :]
if 0 <= i_x < voxel_size[0] and 0 <= i_y < voxel_size[1] and 0 <= i_z < voxel_size[2]:
voxel_binary[i_x, i_y, i_z] = 11 # the bin has at least one point (bin is not empty)
voxel_xyz[i_x, i_y, i_z, :] = point_grid[h, w, :]
# position[h, w, :] = point_grid[h, w, :] # 记录图片上的每个像素对应的voxel位置
# 记录图片上的每个像素对应的voxel位置
position[h, w] = np.ravel_multi_index(point_grid[h, w, :], voxel_size)
# TODO 这个project的方式可以改进
position4[h, ] = np.ravel_multi_index((point_grid[h, w, :] / 4).astype(np.int32), voxel_size_lr)
# position44[h / 4, w / 4] = np.ravel_multi_index(point_grid[h, w, :] / 4, voxel_size_lr)
# output --- 3D Tensor, 240 x 144 x 240
del depth, gx, gy, pt_cam, pt_world, pt_world2, point_grid # Release Memory
return voxel_binary, voxel_xyz, position, position4 # (W, H, D), (W, H, D, 3)
| 11,584
| 43.72973
| 119
|
py
|
Unilm
|
Unilm-master/conver_torch_to_tf.py
|
"""
@author: liucong
@contact: logcongcong@gmail.com
@time: 2020/7/27 13:39
"""
from convert_unilm_pytorch_checkpoint_to_original_tf import convert_pytorch_checkpoint_to_tf
from modeling_unilm import UnilmForLM
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
def f(torch_bert_dir, save_dir):
model = UnilmForLM.from_pretrained(torch_bert_dir)
convert_pytorch_checkpoint_to_tf(model, save_dir, "bert_model")
if __name__ == "__main__":
torch_bert_dir = "yunwen_github/Unilm/model"
save_dir = "yunwen_github/Unilm/model_tf"
f(torch_bert_dir, save_dir)
| 626
| 25.125
| 92
|
py
|
Unilm
|
Unilm-master/modeling_unilm.py
|
# coding=utf-8
"""PyTorch UniLM model. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from transformers.modeling_utils import PreTrainedModel
from configuration_unilm import UnilmConfig
from transformers.modeling_bert import load_tf_weights_in_bert, BertPooler, BertIntermediate, BertOutput, BertPredictionHeadTransform, BertSelfOutput, BertLMPredictionHead, BertOnlyMLMHead, BertOnlyMLMHead, BertEmbeddings, BertOnlyNSPHead
logger = logging.getLogger(__name__)
UNILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'unilm-base-cased': "",
'unilm-large-cased': ""
}
BertLayerNorm = torch.nn.LayerNorm
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
x = x.view(*sz)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None):
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for layer_module in self.layer:
hidden_states = layer_module(
hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UnilmPreTrainedModel(PreTrainedModel):
config_class = UnilmConfig
pretrained_model_archive_map = UNILM_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "unilm"
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class UnilmModel(UnilmPreTrainedModel):
def __init__(self, config):
super(UnilmModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class UnilmModelIncr(UnilmModel):
def __init__(self, config):
super(UnilmModelIncr, self).__init__(config)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True, prev_embedding=None,
prev_encoded_layers=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class LabelSmoothingLoss(_Loss):
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob.type_as(output), reduction='none').view(batch_size, num_pos, -1).sum(2)
class UnilmForLM(UnilmPreTrainedModel):
def __init__(self, config):
super(UnilmForLM, self).__init__(config)
self.bert = UnilmModel(config)
self.cls = BertOnlyMLMHead(config)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
if hasattr(config, 'label_smoothing') and config.label_smoothing:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
else:
self.crit_mask_lm_smoothed = None
self.num_labels = 2
self.cls2 = BertOnlyNSPHead(config)
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, masked_pos=None, masked_weights=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
def gather_seq_out_by_pos_average(seq, pos, mask):
batch_size, max_token_num = pos.size(0), pos.size(-1)
pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze(
2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1))
mask = mask.type_as(pos_vec)
pos_vec_masked_sum = (
pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2)
return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
if masked_lm_labels is None:
if masked_pos is None:
prediction_scores = self.cls(sequence_output)
else:
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores = self.cls(sequence_output_masked)
return prediction_scores
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores_masked = self.cls(sequence_output_masked)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels)
masked_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weights)
seq_relationship_score = self.cls2(pooled_output)
if next_sentence_label is None:
total_loss = masked_lm_loss
else:
next_sentence_loss = self.crit_next_sent(
seq_relationship_score.view(-1, self.num_labels).float(), next_sentence_label.view(-1))
total_loss = next_sentence_loss + masked_lm_loss
return total_loss
class UnilmForSeq2Seq(UnilmPreTrainedModel):
"""refer to BertForPreTraining"""
def __init__(self, config):
super(UnilmForSeq2Seq, self).__init__(config)
self.bert = UnilmModel(config)
self.cls = BertOnlyMLMHead(config)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
if hasattr(config, 'label_smoothing') and config.label_smoothing:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
else:
self.crit_mask_lm_smoothed = None
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, masked_pos=None, masked_weights=None, num_tokens_a=None, num_tokens_b=None):
sequence_output, __ = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
def gather_seq_out_by_pos_average(seq, pos, mask):
batch_size, max_token_num = pos.size(0), pos.size(-1)
pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze(
2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1))
mask = mask.type_as(pos_vec)
pos_vec_masked_sum = (
pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2)
return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
if masked_lm_labels is None:
if masked_pos is None:
prediction_scores = self.cls(sequence_output)
else:
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores = self.cls(sequence_output_masked)
return prediction_scores
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores_masked = self.cls(sequence_output_masked)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels)
masked_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weights)
return masked_lm_loss
class UnilmForSeq2SeqDecode(UnilmPreTrainedModel):
def __init__(self, config, mask_word_id=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0):
super(UnilmForSeq2SeqDecode, self).__init__(config)
self.bert = UnilmModelIncr(config)
self.cls = BertOnlyMLMHead(config)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.mask_word_id = mask_word_id
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos+1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos+1, :next_pos+1]
curr_position_ids = position_ids[:, start_pos:next_pos+1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores = self.cls(last_hidden)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores = self.cls(last_hidden)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos-input_length+1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).float())
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
mask_ids = first_expand(mask_ids)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n-1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not(self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
out_tensor[i, :length, ...] = tensor
return out_tensor
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| 31,656
| 44.095442
| 238
|
py
|
Unilm
|
Unilm-master/run_seq2seq.py
|
# coding=utf-8
import os
import logging
import glob
import math
import json
import argparse
import random
from pathlib import Path
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import RandomSampler
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from tokenization_unilm import UnilmTokenizer, WhitespaceTokenizer
from modeling_unilm import UnilmForSeq2Seq, UnilmConfig
from transformers import AdamW, get_linear_schedule_with_warmup
import utils_seq2seq
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys())
for conf in (UnilmConfig,)), ())
MODEL_CLASSES = {
'unilm': (UnilmConfig, UnilmForSeq2Seq, UnilmTokenizer)
}
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def _get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list]
) & set([int(Path(fn).stem.split('.')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--src_file", default=None, type=str,
help="The input data file name.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--log_dir", default='', type=str,
help="The output directory where the log will be written.")
parser.add_argument("--model_recover_path", default=None, type=str,
help="The file of fine-tuned pretraining model.")
parser.add_argument("--optim_recover_path", default=None, type=str,
help="The file of pretraining optimizer.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
# Other parameters
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--max_position_embeddings', type=int, default=None,
help="max position embeddings")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=32, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--label_smoothing", default=0, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="The weight decay rate for Adam.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--hidden_dropout_prob", default=0.1, type=float,
help="Dropout rate for hidden states.")
parser.add_argument("--attention_probs_dropout_prob", default=0.1, type=float,
help="Dropout rate for attention probabilities.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
parser.add_argument('--max_len_a', type=int, default=0,
help="Truncate_config: maximum length of segment A.")
parser.add_argument('--max_len_b', type=int, default=0,
help="Truncate_config: maximum length of segment B.")
parser.add_argument('--trunc_seg', default='',
help="Truncate_config: first truncate segment A/B (option: a, b).")
parser.add_argument('--always_truncate_tail', action='store_true',
help="Truncate_config: Whether we should always truncate tail.")
parser.add_argument("--mask_prob", default=0.20, type=float,
help="Number of prediction is sometimes less than max_pred when sequence is short.")
parser.add_argument("--mask_prob_eos", default=0, type=float,
help="Number of prediction is sometimes less than max_pred when sequence is short.")
parser.add_argument('--max_pred', type=int, default=20,
help="Max tokens of prediction.")
parser.add_argument("--num_workers", default=0, type=int,
help="Number of workers for the data loader.")
parser.add_argument('--mask_source_words', action='store_true',
help="Whether to mask source words for training")
parser.add_argument('--skipgram_prb', type=float, default=0.0,
help='prob of ngram mask')
parser.add_argument('--skipgram_size', type=int, default=1,
help='the max size of ngram mask')
parser.add_argument('--mask_whole_word', action='store_true',
help="Whether masking a whole word.")
args = parser.parse_args()
if not(args.model_recover_path and Path(args.model_recover_path).exists()):
args.model_recover_path = None
args.output_dir = args.output_dir.replace(
'[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', ''))
args.log_dir = args.log_dir.replace(
'[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', ''))
os.makedirs(args.output_dir, exist_ok=True)
if args.log_dir:
os.makedirs(args.log_dir, exist_ok=True)
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'opt.json'), 'w'), sort_keys=True, indent=2)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
dist.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(
args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True.")
if args.local_rank not in (-1, 0):
# Make sure only the first process in distributed training will download model & vocab
dist.barrier()
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path, max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer
if args.local_rank == 0:
dist.barrier()
if args.do_train:
print("Loading Train Dataset", args.data_dir)
bi_uni_pipeline = [utils_seq2seq.Preprocess4Seq2seq(args.max_pred, args.mask_prob, list(tokenizer.vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length, mask_source_words=False, skipgram_prb=args.skipgram_prb, skipgram_size=args.skipgram_size, mask_whole_word=args.mask_whole_word, tokenizer=data_tokenizer)]
file = os.path.join(
args.data_dir, args.src_file if args.src_file else 'train.tgt')
train_dataset = utils_seq2seq.Seq2SeqDataset(
file, args.train_batch_size, data_tokenizer, args.max_seq_length, bi_uni_pipeline=bi_uni_pipeline)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset, replacement=False)
_batch_size = args.train_batch_size
else:
train_sampler = DistributedSampler(train_dataset)
_batch_size = args.train_batch_size // dist.get_world_size()
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=_batch_size, sampler=train_sampler,
num_workers=args.num_workers, collate_fn=utils_seq2seq.batch_list_to_batch_tensors, pin_memory=False)
# note: args.train_batch_size has been changed to (/= args.gradient_accumulation_steps)
# t_total = int(math.ceil(len(train_dataset.ex_list) / args.train_batch_size)
t_total = int(len(train_dataloader) * args.num_train_epochs /
args.gradient_accumulation_steps)
# Prepare model
recover_step = _get_max_epoch_model(args.output_dir)
if args.local_rank not in (-1, 0):
# Make sure only the first process in distributed training will download model & vocab
dist.barrier()
global_step = 0
if (recover_step is None) and (args.model_recover_path is None):
model_recover = None
else:
if recover_step:
logger.info("***** Recover model: %d *****", recover_step)
model_recover = torch.load(os.path.join(
args.output_dir, "model.{0}.bin".format(recover_step)), map_location='cpu')
# recover_step == number of epochs
global_step = math.floor(
recover_step * t_total / args.num_train_epochs)
elif args.model_recover_path:
logger.info("***** Recover model: %s *****",
args.model_recover_path)
model_recover = torch.load(
args.model_recover_path, map_location='cpu')
model = model_class.from_pretrained(
args.model_name_or_path, state_dict=model_recover, config=config)
if args.local_rank == 0:
dist.barrier()
model.to(device)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion*t_total), num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
if args.local_rank != -1:
try:
from torch.nn.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("DistributedDataParallel")
model = DDP(model, device_ids=[
args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if recover_step:
logger.info("***** Recover optimizer: %d *****", recover_step)
optim_recover = torch.load(os.path.join(
args.output_dir, "optim.{0}.bin".format(recover_step)), map_location='cpu')
if hasattr(optim_recover, 'state_dict'):
optim_recover = optim_recover.state_dict()
optimizer.load_state_dict(optim_recover)
logger.info("***** Recover amp: %d *****", recover_step)
amp_recover = torch.load(os.path.join(
args.output_dir, "amp.{0}.bin".format(recover_step)), map_location='cpu')
amp.load_state_dict(amp_recover)
logger.info("***** Recover scheduler: %d *****", recover_step)
scheduler_recover = torch.load(os.path.join(
args.output_dir, "sched.{0}.bin".format(recover_step)), map_location='cpu')
scheduler.load_state_dict(scheduler_recover)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", t_total)
model.train()
if recover_step:
start_epoch = recover_step+1
else:
start_epoch = 1
for i_epoch in trange(start_epoch, int(args.num_train_epochs)+1, desc="Epoch", disable=args.local_rank not in (-1, 0)):
if args.local_rank != -1:
train_sampler.set_epoch(i_epoch)
iter_bar = tqdm(train_dataloader, desc='Iter (loss=X.XXX)',
disable=args.local_rank not in (-1, 0))
for step, batch in enumerate(iter_bar):
batch = [
t.to(device) if t is not None else None for t in batch]
input_ids, segment_ids, input_mask, lm_label_ids, masked_pos, masked_weights, _ = batch
masked_lm_loss = model(input_ids, segment_ids, input_mask, lm_label_ids,
masked_pos=masked_pos, masked_weights=masked_weights)
if n_gpu > 1: # mean() to average on multi-gpu.
# loss = loss.mean()
masked_lm_loss = masked_lm_loss.mean()
loss = masked_lm_loss
# logging for each step (i.e., before normalization by args.gradient_accumulation_steps)
iter_bar.set_description('Iter (loss=%5.3f)' % loss.item())
# ensure that accumlated gradients are normalized
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
global_step += 1
# Save a trained model
if (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info(
"** ** * Saving fine-tuned model and optimizer ** ** * ")
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(
args.output_dir, "model.{0}.bin".format(i_epoch))
torch.save(model_to_save.state_dict(), output_model_file)
output_optim_file = os.path.join(
args.output_dir, "optim.{0}.bin".format(i_epoch))
torch.save(optimizer.state_dict(), output_optim_file)
if args.fp16:
output_amp_file = os.path.join(
args.output_dir, "amp.{0}.bin".format(i_epoch))
torch.save(amp.state_dict(), output_amp_file)
output_sched_file = os.path.join(
args.output_dir, "sched.{0}.bin".format(i_epoch))
torch.save(scheduler.state_dict(), output_sched_file)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if __name__ == "__main__":
main()
| 19,869
| 50.343669
| 330
|
py
|
Unilm
|
Unilm-master/configuration_unilm.py
|
# coding=utf-8
""" UniLM model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'unilm-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/unilm/unilm-large-cased-config.json",
'unilm-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/unilm/unilm-base-cased-config.json"
}
class UnilmConfig(PretrainedConfig):
r"""
:class:`~transformers.UnilmConfig` is the configuration class to store the configuration of a
`UnilmModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `UnilmModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`UnilmModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(UnilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| 4,038
| 44.382022
| 116
|
py
|
Unilm
|
Unilm-master/decode_seq2seq.py
|
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import argparse
import math
import random
from tqdm import tqdm, trange
import pickle
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tokenization_unilm import UnilmTokenizer, WhitespaceTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from modeling_unilm import UnilmForSeq2SeqDecode, UnilmConfig
# from transformers import (UnilmTokenizer, WhitespaceTokenizer,
# UnilmForSeq2SeqDecode, AdamW, UnilmConfig)
import utils_seq2seq
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys())
for conf in (UnilmConfig,)), ())
MODEL_CLASSES = {
'unilm': (UnilmConfig, UnilmForSeq2SeqDecode, UnilmTokenizer)
}
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--model_recover_path", default=None, type=str,
help="The file of fine-tuned pretraining model.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_seq_length", default=512, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
# decoding parameters
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--input_file", type=str, help="Input file")
parser.add_argument('--subset', type=int, default=0,
help="Decode a subset of the input dataset.")
parser.add_argument("--output_file", type=str, help="output file")
parser.add_argument("--split", type=str, default="",
help="Data split (train/val/test).")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
parser.add_argument('--seed', type=int, default=123,
help="random seed for initialization")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--batch_size', type=int, default=4,
help="Batch size for decoding.")
parser.add_argument('--beam_size', type=int, default=1,
help="Beam size for searching")
parser.add_argument('--length_penalty', type=float, default=0,
help="Length penalty for beam search")
parser.add_argument('--forbid_duplicate_ngrams', action='store_true')
parser.add_argument('--forbid_ignore_word', type=str, default=None,
help="Forbid the word during forbid_duplicate_ngrams")
parser.add_argument("--min_len", default=None, type=int)
parser.add_argument('--need_score_traces', action='store_true')
parser.add_argument('--ngram_size', type=int, default=3)
parser.add_argument('--max_tgt_length', type=int, default=128,
help="maximum length of target sequence")
args = parser.parse_args()
if args.need_score_traces and args.beam_size <= 1:
raise ValueError(
"Score trace is only available for beam search with beam size > 1.")
if args.max_tgt_length >= args.max_seq_length - 2:
raise ValueError("Maximum tgt length exceeds max seq length - 2.")
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path, max_position_embeddings=args.max_seq_length)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
bi_uni_pipeline = []
bi_uni_pipeline.append(utils_seq2seq.Preprocess4Seq2seqDecode(list(tokenizer.vocab.keys()), tokenizer.convert_tokens_to_ids,
args.max_seq_length, max_tgt_length=args.max_tgt_length))
# Prepare model
mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids(
["[MASK]", "[SEP]", "[S2S_SOS]"])
forbid_ignore_set = None
if args.forbid_ignore_word:
w_list = []
for w in args.forbid_ignore_word.split('|'):
if w.startswith('[') and w.endswith(']'):
w_list.append(w.upper())
else:
w_list.append(w)
forbid_ignore_set = set(tokenizer.convert_tokens_to_ids(w_list))
print(args.model_recover_path)
for model_recover_path in glob.glob(args.model_recover_path.strip()):
logger.info("***** Recover model: %s *****", model_recover_path)
model_recover = torch.load(model_recover_path)
model = model_class.from_pretrained(args.model_name_or_path, state_dict=model_recover, config=config, mask_word_id=mask_word_id, search_beam_size=args.beam_size, length_penalty=args.length_penalty,
eos_id=eos_word_ids, sos_id=sos_word_id, forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set, ngram_size=args.ngram_size, min_len=args.min_len)
del model_recover
model.to(device)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model = amp.initialize(model, opt_level=args.fp16_opt_level)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
torch.cuda.empty_cache()
model.eval()
next_i = 0
max_src_length = args.max_seq_length - 2 - args.max_tgt_length
with open(args.input_file, encoding="utf-8") as fin:
input_lines = [x.strip() for x in fin.readlines()]
if args.subset > 0:
logger.info("Decoding subset: %d", args.subset)
input_lines = input_lines[:args.subset]
data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer
input_lines = [data_tokenizer.tokenize(
x)[:max_src_length] for x in input_lines]
input_lines = sorted(list(enumerate(input_lines)),
key=lambda x: -len(x[1]))
output_lines = [""] * len(input_lines)
score_trace_list = [None] * len(input_lines)
total_batch = math.ceil(len(input_lines) / args.batch_size)
with tqdm(total=total_batch) as pbar:
while next_i < len(input_lines):
_chunk = input_lines[next_i:next_i + args.batch_size]
buf_id = [x[0] for x in _chunk]
buf = [x[1] for x in _chunk]
next_i += args.batch_size
max_a_len = max([len(x) for x in buf])
instances = []
for instance in [(x, max_a_len) for x in buf]:
for proc in bi_uni_pipeline:
instances.append(proc(instance))
with torch.no_grad():
batch = utils_seq2seq.batch_list_to_batch_tensors(
instances)
batch = [
t.to(device) if t is not None else None for t in batch]
input_ids, token_type_ids, position_ids, input_mask = batch
traces = model(input_ids, token_type_ids,
position_ids, input_mask)
if args.beam_size > 1:
traces = {k: v.tolist() for k, v in traces.items()}
output_ids = traces['pred_seq']
else:
output_ids = traces.tolist()
for i in range(len(buf)):
w_ids = output_ids[i]
output_buf = tokenizer.convert_ids_to_tokens(w_ids)
output_tokens = []
for t in output_buf:
if t in ("[SEP]", "[PAD]"):
break
output_tokens.append(t)
output_sequence = ' '.join(detokenize(output_tokens))
output_lines[buf_id[i]] = output_sequence
if args.need_score_traces:
score_trace_list[buf_id[i]] = {
'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]}
pbar.update(1)
if args.output_file:
fn_out = args.output_file
else:
fn_out = model_recover_path+'.'+args.split
with open(fn_out, "w", encoding="utf-8") as fout:
for l in output_lines:
fout.write(l)
fout.write("\n")
if args.need_score_traces:
with open(fn_out + ".trace.pickle", "wb") as fout_trace:
pickle.dump(
{"version": 0.0, "num_samples": len(input_lines)}, fout_trace)
for x in score_trace_list:
pickle.dump(x, fout_trace)
if __name__ == "__main__":
main()
| 12,437
| 46.473282
| 225
|
py
|
Unilm
|
Unilm-master/convert_unilm_pytorch_checkpoint_to_original_tf.py
|
"""
@author: liucong
@contact: logcongcong@gmail.com
@time: 2020/7/27 13:53
"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from modeling_unilm import UnilmForLM
def convert_pytorch_checkpoint_to_tf(model: UnilmForLM, ckpt_dir: str, model_name: str):
tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
var_map = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
state_dict = model.state_dict()
def to_tf_var_name(name: str):
for patt, repl in iter(var_map):
name = name.replace(patt, repl)
return "{}".format(name)
def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):
tf_dtype = tf.dtypes.as_dtype(tensor.dtype)
tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(tf_var)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
tf_name = to_tf_var_name(var_name)
torch_tensor = state_dict[var_name].numpy()
if any([x in var_name for x in tensors_to_transpose]):
torch_tensor = torch_tensor.T
tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)
tf.keras.backend.set_value(tf_var, torch_tensor)
tf_weight = session.run(tf_var)
print("Successfully created {}: {}".format(tf_name, np.allclose(tf_weight, torch_tensor)))
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt"))
def main(raw_args=None):
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, required=True, help="model name e.g. bert-base-uncased")
parser.add_argument(
"--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model"
)
parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin")
parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model")
args = parser.parse_args(raw_args)
model = UnilmForLM.from_pretrained(
pretrained_model_name_or_path=args.model_name,
state_dict=torch.load(args.pytorch_model_path),
cache_dir=args.cache_dir,
)
convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name)
if __name__ == "__main__":
main()
| 3,081
| 35.258824
| 118
|
py
|
Unilm
|
Unilm-master/__init__.py
|
"""
@author: liucong
@contact: logcongcong@gmail.com
@time: 2020/6/16 15:44
"""
| 79
| 15
| 31
|
py
|
Unilm
|
Unilm-master/utils_seq2seq.py
|
# coding=utf-8
from random import randint, shuffle, choice
from random import random as rand
import math
import numpy as np
import torch
import torch.utils.data
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
try:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
except:
batch_tensors.append(None)
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.vocab_words = None
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def init_skipgram_size_geo_list(self, p):
if p > 0:
g_list = []
t = p
for _ in range(self.skipgram_size):
g_list.append(t)
t *= (1-p)
s = sum(g_list)
self.skipgram_size_geo_list = [x/s for x in g_list]
def __call__(self, instance):
raise NotImplementedError
# pre_whole_word: tokenize to words before masking
# post whole word (--mask_whole_word): expand to words after masking
def get_masked_pos(self, tokens, n_pred, add_skipgram=False, mask_segment=None, protect_range=None):
if self.pre_whole_word:
pre_word_split = _get_word_split_index(tokens, 0, len(tokens))
else:
pre_word_split = list(range(0, len(tokens)+1))
span_list = list(zip(pre_word_split[:-1], pre_word_split[1:]))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
if mask_segment:
for i, sp in enumerate(span_list):
sp_st, sp_end = sp
if (sp_end-sp_st == 1) and tokens[sp_st].endswith('SEP]'):
segment_index = i
break
for i, sp in enumerate(span_list):
sp_st, sp_end = sp
if (sp_end-sp_st == 1) and (tokens[sp_st].endswith('CLS]') or tokens[sp_st].endswith('SEP]')):
special_pos.add(i)
else:
if mask_segment:
if ((i < segment_index) and ('a' in mask_segment)) or ((i > segment_index) and ('b' in mask_segment)):
cand_pos.append(i)
else:
cand_pos.append(i)
shuffle(cand_pos)
masked_pos = set()
for i_span in cand_pos:
if len(masked_pos) >= n_pred:
break
cand_st, cand_end = span_list[i_span]
if len(masked_pos)+cand_end-cand_st > n_pred:
continue
if any(p in masked_pos for p in range(cand_st, cand_end)):
continue
n_span = 1
rand_skipgram_size = 0
# ngram
if self.skipgram_size_geo_list:
# sampling ngram size from geometric distribution
rand_skipgram_size = np.random.choice(
len(self.skipgram_size_geo_list), 1, p=self.skipgram_size_geo_list)[0] + 1
else:
if add_skipgram and (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
rand_skipgram_size = min(
randint(2, self.skipgram_size), len(span_list)-i_span)
for n in range(2, rand_skipgram_size+1):
tail_st, tail_end = span_list[i_span+n-1]
if (tail_end-tail_st == 1) and (tail_st in special_pos):
break
if len(masked_pos)+tail_end-cand_st > n_pred:
break
n_span = n
st_span, end_span = i_span, i_span + n_span
if self.mask_whole_word:
# pre_whole_word==False: position index of span_list is the same as tokens
st_span, end_span = _expand_whole_word(
tokens, st_span, end_span)
skip_pos = None
for sp in range(st_span, end_span):
for mp in range(span_list[sp][0], span_list[sp][1]):
if not(skip_pos and (mp in skip_pos)) and (mp not in special_pos) and not(protect_range and (protect_range[0] <= mp < protect_range[1])):
masked_pos.add(mp)
if len(masked_pos) < n_pred:
shuffle(cand_pos)
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos not in masked_pos:
masked_pos.add(pos)
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
# shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
return masked_pos
def replace_masked_tokens(self, tokens, masked_pos):
if self.span_same_mask:
masked_pos = sorted(list(masked_pos))
prev_pos, prev_rand = None, None
for pos in masked_pos:
if self.span_same_mask and (pos-1 == prev_pos):
t_rand = prev_rand
else:
t_rand = rand()
if t_rand < 0.8: # 80%
tokens[pos] = '[MASK]'
elif t_rand < 0.9: # 10%
tokens[pos] = get_random_word(self.vocab_words)
prev_pos, prev_rand = pos, t_rand
# Input file format :
# 1. One sentence per line. These should ideally be actual sentences,
# not entire paragraphs or arbitrary spans of text. (Because we use
# the sentence boundaries for the "next sentence prediction" task).
# 2. Blank lines between documents. Document boundaries are needed
# so that the "next sentence prediction" task doesn't span between documents.
def truncate_tokens_pair(tokens_a, tokens_b, max_len):
if len(tokens_a) + len(tokens_b) > max_len-3:
while len(tokens_a) + len(tokens_b) > max_len-3:
if len(tokens_a) > len(tokens_b):
tokens_a = tokens_a[:-1]
else:
tokens_b = tokens_b[:-1]
return tokens_a, tokens_b
def truncate_tokens_signle(tokens_a, max_len):
if len(tokens_a) > max_len-2:
tokens_a = tokens_a[:max_len-2]
return tokens_a
from functools import partial
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
class Seq2SeqDataset(torch.utils.data.Dataset):
""" Load sentence pair (sequential or random order) from corpus """
def __init__(self, file, batch_size, tokenizer, max_len, short_sampling_prob=0.1, sent_reverse_order=False, bi_uni_pipeline=[]):
super().__init__()
self.tokenizer = tokenizer # tokenize function
self.max_len = max_len # maximum length of tokens
self.short_sampling_prob = short_sampling_prob
self.bi_uni_pipeline = bi_uni_pipeline
self.batch_size = batch_size
self.sent_reverse_order = sent_reverse_order
# read the file into memory
self.ex_list = []
# with open(file, "r", encoding='utf-8') as f:
# for i, line in enumerate(f):
# sample = eval(line.strip())
# src_tk = tokenizer.tokenize(sample["src_text"])
# tgt_tk = tokenizer.tokenize(sample["tgt_text"])
# assert len(src_tk) > 0
# assert len(tgt_tk) > 0
# self.ex_list.append((src_tk, tgt_tk))
file_data = open(file, "r", encoding='utf-8')
#
threads = min(8, cpu_count())
with Pool(threads) as p:
annotate_ = partial(
self.read_data,
tokenizer=self.tokenizer)
self.ex_list = list(
tqdm(
p.imap(annotate_, file_data.readlines(), chunksize=32),
total=len(file_data.readlines()),
desc="convert squad examples to features",
)
)
# fin = open("look_new.json", "w",encoding="utf-8")
# for jj, m in enumerate(self.ex_list):
# fin.write(str(jj)+"\t"+str(m)+"\n")
print('Load {0} documents'.format(len(self.ex_list)))
# exit()
def read_data(self, line, tokenizer):
sample = eval(line.strip())
# src_tk = tokenizer.tokenize(sample["src_text"])
# tgt_tk = tokenizer.tokenize(sample["tgt_text"])
src_tk = sample["src_text"]
tgt_tk = sample["tgt_text"]
return (src_tk, tgt_tk)
def __len__(self):
return len(self.ex_list)
def __getitem__(self, idx):
instance = self.ex_list[idx]
# proc = choice(self.bi_uni_pipeline)
new_instance = ()
for proc in self.bi_uni_pipeline:
new_instance += proc(instance)
return new_instance
def __iter__(self): # iterator to load data
for __ in range(math.ceil(len(self.ex_list) / float(self.batch_size))):
batch = []
for __ in range(self.batch_size):
idx = randint(0, len(self.ex_list)-1)
batch.append(self.__getitem__(idx))
# To Tensor
yield batch_list_to_batch_tensors(batch)
class Preprocess4Seq2seq(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
next_sentence_label = None
tokens_a, tokens_b = instance[:2]
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_b = self.tokenizer.tokenize(tokens_b)
# -3 for special tokens [CLS], [SEP], [SEP]
tokens_a, tokens_b = truncate_tokens_pair(tokens_a, tokens_b, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']
segment_ids = [4]*(len(tokens_a)+2) + [5]*(len(tokens_b)+1)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = len(tokens_b)
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
cand_pos.append(i)
elif self.mask_source_words and (i < len(tokens_a)+2) and (tk != '[CLS]') and (not tk.startswith('[SEP')):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
max_cand_pos = max(cand_pos)
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.zeros(self.max_len, self.max_len, dtype=torch.long)
input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = len(
tokens_a)+2, len(tokens_a)+len(tokens_b)+3
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4BiLM(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
tokens_a, tokens_b = instance[:2]
if rand() <= 0.5:
next_sentence_label = 1.0
else:
tokens_a, tokens_b = tokens_b, tokens_a
next_sentence_label = 0.0
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_b = self.tokenizer.tokenize(tokens_b)
# -3 for special tokens [CLS], [SEP], [SEP]
tokens_a, tokens_b = truncate_tokens_pair(tokens_a, tokens_b, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']
segment_ids = [0]*(len(tokens_a)+2) + [1]*(len(tokens_b)+1)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = len(tokens_b)
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(
1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
cand_pos.append(i)
elif self.mask_source_words and (i < len(tokens_a)+2) and (tk != '[CLS]') and (not tk.startswith('[SEP')):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
max_cand_pos = max(cand_pos)
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.ones(self.max_len, self.max_len, dtype=torch.long)
# input_mask[:, :len(tokens_a)+2].fill_(1)
# second_st, second_end = len(
# tokens_a)+2, len(tokens_a)+len(tokens_b)+3
# input_mask[second_st:second_end, second_st:second_end].copy_(
# self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4RightLM(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
next_sentence_label = None
tokens_a, _ = instance[:2]
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_a = truncate_tokens_signle(tokens_a, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [2]*(len(tokens_a)+2)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = 0
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(
1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
# if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
# cand_pos.append(i)
if (tk != '[CLS]') and (tk != '[SEP]'):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
try:
max_cand_pos = max(cand_pos)
except:
max_cand_pos = 0
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.ones(self.max_len, self.max_len, dtype=torch.long)
# input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = 0, len(tokens_a)+2
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4LeftLM(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.triu(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
next_sentence_label = None
tokens_a, _ = instance[:2]
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_a = truncate_tokens_signle(tokens_a, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [3]*(len(tokens_a)+2)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = 0
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(
1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
# if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
# cand_pos.append(i)
if (tk != '[CLS]') and (tk != '[SEP]'):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
try:
max_cand_pos = max(cand_pos)
except:
max_cand_pos = 0
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.ones(self.max_len, self.max_len, dtype=torch.long)
# input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = 0, len(tokens_a)+2
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4Seq2seqDecode(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.max_tgt_length = max_tgt_length
def __call__(self, instance):
tokens_a, max_a_len = instance
# Add Special Tokens
padded_tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']
assert len(padded_tokens_a) <= max_a_len + 2
if max_a_len + 2 > len(padded_tokens_a):
padded_tokens_a += ['[PAD]'] * \
(max_a_len + 2 - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + 2
max_len_in_batch = min(self.max_tgt_length +
max_a_len + 2, self.max_len)
tokens = padded_tokens_a
segment_ids = [4]*(len(padded_tokens_a)) + [5]*(max_len_in_batch - len(padded_tokens_a))
position_ids = []
for i in range(len(tokens_a) + 2):
position_ids.append(i)
for i in range(len(tokens_a) + 2, max_a_len + 2):
position_ids.append(0)
for i in range(max_a_len + 2, max_len_in_batch):
position_ids.append(i - (max_a_len + 2) + len(tokens_a) + 2)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask)
| 33,772
| 38.134415
| 175
|
py
|
Unilm
|
Unilm-master/tokenization_unilm.py
|
# coding=utf-8
"""Tokenization classes for UniLM."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'unilm-large-cased': "",
'unilm-base-cased': ""
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'unilm-large-cased': 512,
'unilm-base-cased': 512
}
class UnilmTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| 868
| 22.486486
| 82
|
py
|
DoTra
|
DoTra-main/latAEModels.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Models for Cycle-GAN on encoded data
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def lin(c_in, c_out, bn=True, dr=False): # """Custom convolutional layer for simplicity."""
layers=[]
if dr > 0: layers.append(nn.Dropout(dr))
layers.append(nn.Linear(c_in, c_out, bias=not bn))
if bn: layers.append(nn.BatchNorm1d(c_out))
return layers
class G(nn.Module):# """Generator for transfering from mnist to svhn"""
def __init__(self, cfg):
super(G, self).__init__()
lw=cfg["aez"]*np.array([1]+cfg["glay"]+[1])
self.laleak=cfg["laLeak"]
lins = []
for j in range(len(lw)-1):
dobn = cfg["gben"] and not (cfg["gben"] == 2 and (j + 1 == len(lw) - 1))
lins+=lin(lw[j],lw[j+1],dobn,cfg["gdrop"][j])
self.lays=nn.Sequential(*lins)
def forward(self, x):
for l in self.lays[:-1]:
x=F.leaky_relu(l(x))
x=self.lays[-1](x)
if self.laleak:x=F.leaky_relu(x)
return x
class D(nn.Module):
def __init__(self, cfg, use_labels=False):
super(D, self).__init__()
n_out = 11 if use_labels else 1
lw = cfg["aez"] * np.array([1] + cfg["dlay"] +[1])
lw[-1]=n_out
lins = []
for j in range(len(lw) - 1):
dobn=cfg["dben"] and not (cfg["dben"]==2 and (j+1 == len(lw) - 1))
lins += lin(lw[j], lw[j + 1],dobn,cfg["ddrop"][j])
self.lays = nn.Sequential(*lins)
def forward(self, x):
for l in self.lays[:-1]:
x = F.leaky_relu(l(x))
x = self.lays[-1](x)
return x
class LinCl(nn.Module):
def __init__(self, cfg):
super(LinCl, self).__init__()
lw = [cfg["aez"]]+list(cfg["aez"]*np.array(cfg["cllay"]))+[cfg["ds"][1]]
lins = []
for j in range(len(lw) - 1):
dobn = cfg["clben"] and not (cfg["clben"] == 2 and (j + 1 == len(lw) - 1))
lins += lin(lw[j], lw[j + 1], dobn, cfg["cldrop"][j])
self.lays = nn.Sequential(*lins)
def forward(self, x):
for l in self.lays[:-1]:
x = F.leaky_relu(l(x))
x = self.lays[-1](x)
return x
| 2,462
| 31.84
| 198
|
py
|
DoTra
|
DoTra-main/main.py
|
# Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
# Licence: Use it however you like, but cite the paper :-)
#Main routine to train models
import sklearn
import torch
from torch.utils.data import Dataset, TensorDataset
import torch.cuda.amp as tca
from optCycEncoded import Solver
import dutils
import AEModels as aut
import trainClassifiers,imgutil
from imgutil import *
def trainOne(cfg):
def cds(X, Y, shuffle=False, norm=True):
noX = imgutil.nor(X.astype(np.float32)) if norm else X
ds = TensorDataset(torch.from_numpy(noX), torch.from_numpy(Y))
return torch.utils.data.DataLoader(ds, batch_size=cfg["batchSize"], shuffle=shuffle, num_workers=0)
def cds2(X, Y, shuffle=False):
noX = [imgutil.nor(cX.astype(np.float32)) for cX in X]
ds = TensorDataset(*[torch.from_numpy(cX) for cX in noX], torch.from_numpy(Y))
return torch.utils.data.DataLoader(ds, batch_size=cfg["batchSize"], shuffle=shuffle, num_workers=0)
def modXY(toModteX,modteY):
t = cfg["trans"][0]
modteX=applyOp(toModteX,t[0],t[1])
return modteX,modteY
def cdsMod(X, Y):
X = imgutil.nor(X)
allX, allY = [X], [Y]
for t in cfg["transEv"]:
modtrX, Y = modXY(allX[-1].astype(np.float32), allY[-1])
allX.append(modtrX)
allY.append(Y)
return allX, allY
def getAE(cfg, trX, trY, teX, teY, sname, dotrain,picname=""):
origtr_iter = cds(trX, trY, True)
origte_iter = cds(teX, teY, False)
(aetrX, aetrY), (aeteX, aeteY), acfg, netAEorg = aut.runAE(cfg, origtr_iter, origte_iter, sname, dotrain,picname)
aetr_iter = cds(aetrX, aetrY, True, False)
aete_iter = cds(aeteX, aeteY, False, False)
return aetr_iter, aete_iter, netAEorg
cget=True
#Get unmodified, raw training data and a base Classifier
totdat= int(cfg["ds"][2])
(trX, trY), (teX, teY) = dutils.getFullDS(cfg, totdat, None, cget)
if cfg["baseZoom"]!=1:
print("Zooming data for Proposition test")
trX=applyOp(trX, "zoo1", cfg["baseZoom"])
teX = applyOp(teX, "zoo1", cfg["baseZoom"])
disoff = int(totdat * cfg["distinctDat"]*0.5)
nd = totdat-disoff
baseCl, baseClRes = trainClassifiers.getclassifier(cfg, cds(trX, trY, True), cds(teX, teY, False), None, getc=False, loadCl=False, save=False, useLat=False) #get base classifier (only used for reference)
toModtrX, modtrY = np.copy(trX[disoff:nd + disoff]), np.copy(trY[disoff:nd + disoff])
toModteX, modteY = np.copy(teX), np.copy(teY)
trX, trY = trX[:nd], trY[:nd]
#Get auto encoding of raw training data and domain adapted data
aetr_iter, aete_iter, netAEorg = getAE(cfg,trX, trY, teX, teY, sname=None, dotrain=cget,picname="onRawOrgDomain") #AE with tanh => -1,1
t = cfg["trans"][0]
modtrX, modtrY = modXY(toModtrX, modtrY)
modteX, modteY = modXY(toModteX,modteY)
failed = False
# Full DoTra: autoencode, transform on latent, learn between domains
aemodtr_iter, aemodte_iter, netAEdom = getAE(cfg,modtrX, modtrY, modteX, modteY, sname=None, dotrain=cget,picname="onRawAdaDomain")
#Get Transformer between auto encodings of raw training data and for the same data in domain-adapted space
solver = Solver(cfg) #if cfg["solvSim"]==0 else solverSimp.Solver(cfg)
othlo, ax0, ax1, ay, atex0, atex1, atey = solver.train(netAEorg,netAEdom,aetr_iter,aete_iter,aemodtr_iter,modteX, modteY,cget,nd) #AE with tanh => -1,1
# Get transformer from original space (not some encoding space) and domain space (not encoded)
a2tr_iter = cds2([ax0, ax1], ay,shuffle=True) # Data where training data in orig space is mapped to data in domain space
a2te_iter = cds2([atex0, atex1], atey) # modtrX, amodteX = cdsMod(trX), cdsMod(teX) #eteX, eteY = amodteX[-nFor:], [teY] * nFor # if cfg["doTra"]: #ntr=ntra+1 - nFor #origtr_iter = cds2(amodtrX,np.copy(trY), True) # Array is copied due to type cast to float32 in cds2 #origte_iter = cds2(amodteX, np.copy(teY), False)
loadtrans = cget
orgTransModel, cyccfg, loaded = trainClassifiers.getTrans(cfg, a2tr_iter, a2te_iter, ((ax0, ay), (atex0, atey)), None, loadtrans, selfTra=False)
#Get labeled domain data, by applying transformer multiple times on source data
nFor = len(cfg["transEv"])# nFor = ntra# if nFor <= 0: print("nothing to forecast", ntra, cfg["transEv"]) return
atrX = [ax0]
atrY = [ay]
for i in range(nFor):
cajx, cajy = [], []
orgTransModel.eval()
cdataset = cds(atrX[-1], atrY[-1],shuffle=False,norm=cfg["evNorm"]) # citer=cds2([atrX[-2],atrX[-1]],atrY[-1])
for data in cdataset:
with tca.autocast():
dsx = data[0].cuda()
if not cfg["dirCyc"]: dsx=[None,dsx]
output = orgTransModel(dsx).detach().cpu()
cajx.append(output.clone().numpy())
cajy.append(data[-1].clone().numpy())
atrX.append(np.concatenate(cajx, axis=0))
atrY.append(np.concatenate(cajy, axis=0))
etrX, etrY = atrX[-nFor:], atrY[-nFor:] #print("nfo", nFor, len(atrX)) # imgutil.makeAndStore2(atrX[-3][:64],atrX[-2][:64],atrX[-1][:64], cfg["bFolder"] + "samples/", "FORCAST"+str(cfg["bid"]) + fs(cfg) + ".png")
if not failed:
#Get domain datasets used for prediction
amodteX,amodteY = cdsMod(teX,teY)
eteX, eteY = amodteX[-nFor:], amodteY[-nFor:]
def evalCl(ltrX, ltrY, lteX, lteY,domid):
def cods(lX, lY, shuffle=False):
trX, trY = np.concatenate(lX, axis=0), np.concatenate(lY, axis=0) # trY=np.concatenate([np.ones(aj[0].shape[0],dtype=np.int)*j for j in range(len(aj))])
trX, trY = sklearn.utils.shuffle(trX, trY)
trit = cds(trX, trY, shuffle)
return trit
trit = cods(ltrX, ltrY, True)
teit = cods(lteX, lteY, False)
netCl, clcfg = trainClassifiers.getclassifier(cfg, trit, teit, None, getc=False, save=False, loadCl=False)
return clcfg
#Train classifier using labeled data being transformed and apply it to generated test data
vals=np.arange(nFor)
for j in reversed(vals):
clcfg = evalCl([etrX[j]], [etrY[j]], [eteX[j]], [eteY[j]],j)
#print("eval",j,nFor,np.sum(etrX[j]),clcfg,cyccfg)
cyccfg["ptrA" + str(j)] = clcfg["trA"]
cyccfg["pteA" + str(j)] = clcfg["teA"]
cyccfg["mteA" + str(j)] = clcfg["mteA"]
cyccfg = {**cyccfg, **othlo,**baseClRes}
cfg["result"] = [cyccfg]
print("\n\nBench:",cfg["trans"])
print("Result")
res=cfg["result"][0]
print("All metrics",res)
print("Accs (Source, target 0,..,2)",np.round([res["teA"]]+[res["pteA"+str(i)] for i in range(3)],3))
print("MaxAccs", np.round([res["teA"]] + [res["mteA" + str(i)] for i in range(3)], 3))
print("\n\n\n\n")
#print("\n\n\n\nFOR Accuracy: check pteA. (=Accuracy after last epoch) and mteA.(=max Accuracy accross all epochs) in results below")
#print("pteA0 denotes test accuracy on target domain 0, pteA1 target domain 1, pteA2 target domain 2, etc.")
#print("teA denotes test accuracy on source domain")
| 7,497
| 48.006536
| 366
|
py
|
DoTra
|
DoTra-main/runExp.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
print("Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra")
print("Note: You should do at least 12 runs and only take those accuracies, and report the quartile, i.e., the 4th best result, due to instability of Cycle GAN (as described in paper)")
from main import trainOne
#Get DTtra for rotation, zoom and splitting for MNIST
dummy = True #For checking if runs
dummy = False
baseCfg={'imSi':32,'benchID': 0, 'netSi': 1.0, 'ds': ('MNIST', 10, 60000 if not dummy else 2000),'distinctDat': 1, 'datCut': 1, 'batchSize': 64,'dummy': dummy,
'addMin': 1, 'baseZoom': 1, 'aez': 32, 'aeType': 'N', 'aebn': 1, 'aebeta': 1,
'aecfg': {'id': 0, 'opt': ('A', 0.001, 1e-05), 'netT': 'V6'}, 'aeGAN': (0, {'lr': 0.0001}), 'aeganbn': 1, 'sepAE': 1,'clcfg': {'id': 0, 'opt': ('S', 0.1, 0.0005), 'netT': 'V6'}, 'addM': 1,'ttry': (3, 0.3), 'resB': 0,
'convdim': 64, 'nExLay': 0, 'tleak': 0.005, 'epT': 96 if not dummy else 3, 'trcfg': {'id': 0, 'opt': ('A', 0.001, 1e-05), 'netT': 'V6'}, 'epA': 72 if not dummy else 3, 'ker': 3, 'twoLin': 0, 'drop': 0, 'bn': 1, 'epC': 36 if not dummy else 3, 'evNorm': 1, 'genAE': 0, 'singleIn': 1,'traLo': 2, 'sym': 0,
'selfAE': 0,
'dlay': [128], 'glay': [32], 'laLeak': 1, 'gben': 2, 'dben': 2, 'gdrop': (0, 0.2, 0), 'ddrop': (0, 0.2, 0), 'useLab': 0, 'useRec': 3, 'recLo': 1, 'cycFac': [1, 1], 'solvSim': 0, 'd1': 1, 'labSmo': 0.1, 'DGlr': [0.0001, 0.0001], 'DGBeta12': ((0.5, 0.999), (0.5, 0.999)), 'lrdecay': 0.66, 'ntries': (200, 3), 'epG': 96 if not dummy else 4, 'useLat': 0, 'llay': [8, 8], 'ldrop': (0, 0.1, 0, 0), 'lben': 1, 'mlay': [4, 4], 'mdrop': (0, 0, 0), 'mben': 0, 'mLR': 0.001, 'epM': 144 if not dummy else 3, 'mlrdecay': 0.8,
'mBatchSize': 512, 'mntries': (200, 3), 'useClLo': 0, 'trainCl': 0, 'trainOrg': 0, 'smoo': 0, 'useDiff': 0, 'cllay': [4, 4], 'clben': 2, 'cldrop': [0, 0, 0.1], 'epLin': 24, 'filterCl': 0, 'dirCyc': 0, 'accStop': (0, 0.5),
'bpart': '', 'disDat': 1, 'dt':'None'
}
cfg1={**baseCfg,**{'trans': [('splitMid1', 6)], 'transEv': [('splitMid1', 6), ('splitMid1', 12), ('splitMid1', 18)], 'pr': {'dt':'None'}}}
cfg2={**baseCfg,**{'trans': [('zoo1', 1.33)], 'transEv': [('zoo1', 1.33), ('zoo1', 1.769), ('zoo1', 2.353)],'pr': {'dt':'None'} }}
cfg3={**baseCfg,**{'trans': [('rot1', 45)], 'transEv': [('rot1', 45), ('rot1', 90), ('rot1', 135)],'pr': {'dt':'None'}}}
cfgs=[cfg1,cfg2,cfg3]
for i in range(12):
for c in cfgs:
print("\n\nRUN ",i, " Bench",c["trans"])
c["dummy"]=dummy
trainOne(c)
| 2,956
| 88.606061
| 522
|
py
|
DoTra
|
DoTra-main/optCycEncoded.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Based on https://github.com/yunjey/mnist-svhn-transfer/
import torch
import torch.nn as nn
import os
import pickle
import numpy as np
from torch.autograd import Variable
from torch import optim
import torch.cuda.amp as tca
from trainClassifiers import getAcc
from latAEModels import G, D
from trainClassifiers import decay
from classifierModels import worNet
def getTrAcc( cfg, trds,val_dataset):
netCl = worNet(cfg).cuda()
ccf = cfg["clcfg"]
closs, teaccs, trep, loss, clr = 0, [], cfg["epC"]//3, nn.CrossEntropyLoss(), ccf["opt"][1]
# optimizerCl = optim.SGD(netCl.parameters(), lr=0.05, momentum=0.9, weight_decay=1e-5) # elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][2], weight_decay=ccf["opt"][3])
warmup = (max(2, trep // 40), 10)
optimizerCl = optim.SGD(netCl.parameters(), lr=ccf["opt"][1] / warmup[1], momentum=0.9, weight_decay=ccf["opt"][2])
scaler = tca.GradScaler()
clAcc = lambda dataset: getAcc(netCl, dataset, niter=9999, cfg=cfg)
crolo = nn.CrossEntropyLoss()
closs = 0
for epoch in range(trep):
netCl.train()
for i, data in enumerate(trds):
with tca.autocast():
optimizerCl.zero_grad(set_to_none=True)
dsx, dsy = data[0].cuda(), data[1].cuda()
output = netCl(dsx)
errD_real = crolo(output, dsy.long())
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf, epoch, optimizerCl, warmup, trep)
netCl.eval()
if epoch<2 or epoch==trep-1 or epoch%15==0:
print("Train Test CL","ep", epoch, closs, clAcc(val_dataset))
return clAcc(val_dataset),netCl
class Solver(object):
def __init__(self, cfg):
self.cfg=cfg
self.g12,self.g21 = None,None
self.d1,self.d2 = None,None
self.g_optimizer,self.d_optimizer = None,None
self.num_classes = cfg["ds"][1]
self.batch_size = cfg["batchSize"]
def build_model(self): # """Builds a generator and a discriminator."""
self.g12 = G(self.cfg).cuda()
self.g21 = G(self.cfg).cuda()
if self.cfg["d1"]: self.d1 = D(self.cfg, use_labels=self.cfg["useLab"] in [1,2]).cuda()
self.d2 = D(self.cfg, use_labels=self.cfg["useLab"]==2).cuda()
if self.cfg["useLab"] == 3: self.d1cl = D(self.cfg, use_labels=1).cuda()
g_params = list(self.g12.parameters()) + list(self.g21.parameters())
d_params = list(self.d2.parameters())
if self.cfg["d1"]: d_params+= list(self.d1.parameters() )
if self.cfg["useLab"] == 3: d_params+= list(self.d1cl.parameters() )
self.d_optimizer = optim.Adam(d_params, self.cfg["DGlr"][0], self.cfg["DGBeta12"][0])
self.g_optimizer = optim.Adam(g_params, self.cfg["DGlr"][1],self.cfg["DGBeta12"][1])
def to_var(self, x): return Variable(x.cuda()) # """Converts numpy to variable."""
#def to_data(self, x): return x.cpu().data.numpy() #"""Converts variable to numpy."""
def reset_grad(self):
self.g_optimizer.zero_grad(set_to_none=True)
self.d_optimizer.zero_grad(set_to_none=True)
#if self.cfg["useLab"] == 3: self.d3_optimizer.zero_grad()
def decay(self,epoch,total):
if epoch>total*self.cfg["lrdecay"]:
for opt in [self.g_optimizer,self.d_optimizer]:
for g in opt.param_groups:
g['lr']=0.85*g['lr']
def train(self,netAEorg,netAEdom,aetr_iter,aete_iter,aemodtr_iter,modteX,modteY,cget,nd): #drift_iter = iter(self.adaDom_loader) #orgDom_iter = iter(self.orgDom_loader)
othlo, milo,bacc = -1, 1e99,0
reclo, reclo2 = torch.zeros(1), torch.zeros(1)
print("Train Cyc GAN")
if self.cfg["useClLo"]:
from trainClassifiers import getLinCl
clloss = nn.CrossEntropyLoss()
if self.cfg["trainCl"]:
ccf = self.cfg["clcfg"]
from latAEModels import LinCl
clDom = LinCl(self.cfg).cuda()
loOrg, loDom= nn.CrossEntropyLoss(), nn.CrossEntropyLoss()
clr = ccf["opt"][1] / 100
if self.cfg["trainCl"]!=3:
clOrg = LinCl(self.cfg).cuda()
optOrg = optim.SGD(clOrg.parameters(), lr=clr, momentum=0.8, weight_decay=ccf["opt"][2] / 5) #clr / warmup[
else:
linCl, lincfg = getLinCl(self.cfg, aetr_iter, aete_iter,"None", cget, save=True, loadCl=True)
optDom = optim.SGD(clDom.parameters(), lr=clr, momentum=0.8, weight_decay=ccf["opt"][2] / 5)
else:
linCl, lincfg=getLinCl(self.cfg, aetr_iter, aete_iter, "None", cget, save=True, loadCl=True)
self.build_model()
useLabLoss=nn.CrossEntropyLoss() # loss if use_labels = True
ax, ay = [], []
tries = 0
niterEp = nd // self.cfg["batchSize"]
train_iters = self.cfg["epG"] * niterEp + self.cfg["ntries"][0] * self.cfg["ntries"][1] # config.train_iters
labSmo = lambda sh: 2*(torch.rand(sh.shape[0]).cuda()-0.5)*self.cfg["labSmo"] if self.cfg["labSmo"]>0 else 0
recF= torch.square if self.cfg["recLo"] %10== 2 else torch.abs
recLoFct = lambda x: recF(torch.mean(torch.abs(x),dim=1)) if self.cfg["recLo"]>=10 else recF(x)
miter, siter = iter(aetr_iter), iter(aemodtr_iter)
sca0,sca1,sca2,sca3=tca.GradScaler(),tca.GradScaler(),tca.GradScaler(),tca.GradScaler()
def wrap(scaler,opt,lo):
scaler.scale(lo).backward()
scaler.step(opt)
scaler.update()
for step in range(train_iters + 1): # # reset data_iter for each epoch
with tca.autocast():
try:
adaDom, s_labels = next(siter) ## load adaDom and orgDom dataset
orgDom, m_labels = next(miter)
except StopIteration:
miter, siter = iter(aetr_iter), iter(aemodtr_iter)
adaDom, s_labels = next(siter)
orgDom, m_labels = next(miter)
self.decay(step//niterEp, self.cfg["epG"])
orgDom,adaDom = orgDom.float(),adaDom.float()
if step == 0: code_org, code_dom = orgDom.clone().cuda(), adaDom.clone().cuda() #save for outputting images
adaDom, s_labels = self.to_var(adaDom), self.to_var(s_labels).long().squeeze()
orgDom, m_labels = self.to_var(orgDom), self.to_var(m_labels.long())
if self.cfg["useLab"]: orgDom_fake_labels = self.to_var(torch.Tensor([self.num_classes] * adaDom.size(0)).long())
if self.cfg["useLab"] == 2: adaDom_fake_labels = self.to_var(torch.Tensor([self.num_classes] * orgDom.size(0)).long())
# ============ train D ============#
# train with real images
self.reset_grad()
d1_loss = 0
if self.cfg["d1"]:
out = self.d1(orgDom)
if self.cfg["useLab"] ==1 or self.cfg["useLab"] ==2: d1_loss = useLabLoss(out, m_labels)
else: d1_loss = torch.mean((out - 1+labSmo(out)) ** 2)
out = self.d2(adaDom)
if self.cfg["useLab"] == 2: d2_loss = useLabLoss(out, s_labels)
else: d2_loss = torch.mean((out - 1+labSmo(out)) ** 2)
d_orgDom_loss, d_adaDom_loss, d_real_loss = d1_loss, d2_loss, d1_loss + d2_loss
if self.cfg["useLab"] == 3:
out = self.d1cl(orgDom)
d_real_loss += useLabLoss(out, m_labels)
wrap(sca0,self.d_optimizer,d_real_loss)
# train with fake images
self.reset_grad()
fake_adaDom = self.g12(orgDom)
out = self.d2(fake_adaDom)
if self.cfg["useLab"] == 2: d2_loss = useLabLoss(out, adaDom_fake_labels)
else: d2_loss = torch.mean((out+labSmo(out)) ** 2)
fake_orgDom = self.g21(adaDom)
if self.cfg["d1"]:
out = self.d1(fake_orgDom)
if self.cfg["useLab"] ==1 or self.cfg["useLab"] ==2: d1_loss = useLabLoss(out, orgDom_fake_labels)
else: d1_loss = torch.mean((out+labSmo(out)) ** 2)
else: d1_loss=0
d_fake_loss = d1_loss + d2_loss
if self.cfg["useLab"] == 3:
out = self.d1cl(fake_orgDom)
d_fake_loss += useLabLoss(out, orgDom_fake_labels)
# d_fake_loss.backward()
# self.d_optimizer.step()
wrap(sca1, self.d_optimizer, d_fake_loss)
# ============ train G ============#
# train orgDom-adaDom-orgDom cycle
self.reset_grad()
fake_adaDom = self.g12(orgDom)
out = self.d2(fake_adaDom)
if self.cfg["useLab"] == 2: g_loss = useLabLoss(out, m_labels)
else: g_loss = torch.mean((out - 1) ** 2)
if self.cfg["useRec"] > 0:
reconst_orgDom = self.g21(fake_adaDom)
reclo= self.cfg["cycFac"][0]*self.cfg["useRec"] * torch.mean(recLoFct(orgDom - reconst_orgDom))
g_loss += reclo
if self.cfg["useLab"] == 3:
out = self.d1cl(reconst_orgDom)
g_loss += useLabLoss(out, m_labels)
if self.cfg["useClLo"]:
if self.cfg["trainCl"]:
actOrg = clOrg(reconst_orgDom) # subtract loss on original maybe
actDom = clDom(fake_adaDom) # subtract loss on original maybe
if self.cfg["smoo"]:
actOrg=actOrg+torch.mean(torch.abs(actOrg.detach()),dim=0)*self.cfg["smoo"]
actDom = actDom + torch.mean(torch.abs(actDom.detach()),dim=0) * self.cfg["smoo"]
li_loss = self.cfg["trainCl"] * (clloss(actOrg, m_labels) + clloss(actDom, m_labels))
else:
acti = linCl(reconst_orgDom) # subtract loss on original maybe
if self.cfg["smoo"]:
acti=acti+torch.mean(torch.abs(acti.detach()),dim=0)*self.cfg["smoo"]
li_loss=self.cfg["useClLo"]*clloss(acti, m_labels)
g_loss += li_loss
wrap(sca2, self.g_optimizer, g_loss)
# train adaDom-orgDom-adaDom cycle
self.reset_grad()
fake_orgDom = self.g21(adaDom)
if self.cfg["d1"]:
out = self.d1(fake_orgDom)
if self.cfg["useLab"] == 2: g_loss = useLabLoss(out, s_labels)
else: g_loss = torch.mean((out - 1) ** 2)
else: g_loss=0
if self.cfg["useRec"] > 0:
reconst_adaDom = self.g12(fake_orgDom)
reclo2=self.cfg["cycFac"][1] * self.cfg["useRec"] * torch.mean(recLoFct(adaDom - reconst_adaDom))
g_loss += reclo2
wrap(sca3, self.g_optimizer, g_loss)
if self.cfg["useClLo"]:
if self.cfg["trainCl"]:
def trCl(cl,dat, lo, opt):
opt.zero_grad(set_to_none=True)
cl.train()
act=cl(dat)
clo = lo(act, m_labels)
clo.backward()
opt.step()
cl.eval()
if not self.cfg["trainOrg"]==2: trCl(clOrg, reconst_orgDom.detach(), loOrg, optOrg)
if self.cfg["trainOrg"]==1: trCl(clOrg, orgDom.detach(), loOrg, optOrg)
trCl(clDom, fake_adaDom.detach(), loDom, optDom)
if (step + 1) % self.cfg["ntries"][0] == 0: # print the log info self.log_step
useLat = self.cfg["useLat"]
def getaeds(ds):
ax0, ax1, ay = [], [], []
self.g12.eval()
for bx, by in ds:
with tca.autocast():
cx = bx.float().cuda()
fake_code_dom = self.g12(cx)
if not useLat:
orgX = netAEorg.dec(cx).detach().cpu().numpy()
domGenX = netAEdom.dec(fake_code_dom).detach().cpu().numpy()
ax0.append(orgX if not useLat else bx)
ax1.append(domGenX if not useLat else fake_code_dom.detach().cpu().numpy())
ay.append(by.detach().cpu().numpy())
self.g12.train()
return [np.concatenate(cx, axis=0) for cx in [ax0, ax1, ay]]
if (step + 1) % (10*self.cfg["ntries"][0]) == 0:
print('Step [%d/%d], Losses: d_real: %.4f, d_OrgDom: %.4f, d_AdaDom: %.4f, '
'd_fake: %.4f, g: %.4f, r: %.4f, r2: %.4f' % (step + 1, train_iters, d_real_loss.item(), d_orgDom_loss.item() if self.cfg["d1"] else -1, d_adaDom_loss.item(), d_fake_loss.item(), g_loss.item(), reclo.item(), reclo2.item()),self.cfg["pr"])
if self.cfg["useClLo"]: print("LinCl Loss",li_loss.item())
clo = g_loss.item()
if (step // niterEp >= self.cfg["epG"] and milo * 0.85 > clo):
milo = clo
tries += 1
[ax0, ax1, ay] = getaeds(aetr_iter)
[atex0, atex1, atey] = getaeds(aete_iter)
if tries == self.cfg["ntries"][1]: break
othlo = {"DOLo": d_orgDom_loss.item() if self.cfg["d1"] else 0, "DDLo": d_adaDom_loss.item(), "DFLo": d_fake_loss.item()} # "DRLo":d_real_loss.item(),
return othlo,ax0, ax1, ay, atex0, atex1, atey
| 14,096
| 47.947917
| 264
|
py
|
DoTra
|
DoTra-main/imgutil.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
import numpy as np
from scipy import ndimage
def nor(x): return (x - np.mean(x, axis=(0, 2, 3), keepdims=True)) / (np.std(x, axis=(0, 2, 3), keepdims=True) + 1e-7)
def rot1(img, d):
#co = np.min(img) #img[ 0, 0]
co=min(img[0,0],img[-1,-1])
#print(np.min(img),np.max(img),np.mean(img),np.std(img),np.median(img),"test")
img = ndimage.rotate(img, d, reshape=False,cval=co)
#print(co, img[0, 0],bef, "rotting")
return img
def zoo1(img, d):
bo = int(32 * d - 32) // 2
img = ndimage.zoom(img, d)[bo:32 + bo, bo:32 + bo]
return img
def splitMid1(x, d):
w=x.shape[0]
left=x[d//2:w//2,:]
mid=x[:d]*0+min(x[0,0],x[-1,-1])
right = x[ w//2:, :]
x=np.concatenate([left,mid,right],axis=0)[:w]
return x
def getOp(name):
return globals()[name]
def applyOp(x,opname,para):
x=x.astype(np.float32)
op=getOp(opname)
for i in range(x.shape[0]):
for j in range(x.shape[1]): # xo=np.copy(x[i,j])
x[i,j]=op(x[i,j],para)
return nor(x)
| 1,282
| 26.891304
| 198
|
py
|
DoTra
|
DoTra-main/doTraModel.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
import torch.nn as nn
import torch.nn.functional as F
import torch
def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True): #"""Custom deconvolutional layer for simplicity."""
layers = [nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=not bn)]
if bn: layers.append(nn.BatchNorm2d(c_out))
return nn.Sequential(*layers)
def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):#"""Custom convolutional layer for simplicity."""
layers = [nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=not bn)]
if bn: layers.append(nn.BatchNorm2d(c_out))
return nn.Sequential(*layers)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Trans(nn.Module):
def __init__(self, cfg):
super(Trans, self).__init__()
self.cfg=cfg
conv_dim = int(cfg["convdim"]*cfg["netSi"])
self.leak=cfg["tleak"]
# encoding blocks
self.in1=cfg["singleIn"]
self.sym=cfg["sym"]
insym=cfg["imCh"]*(1+2*int(cfg["sym"]==1) +int(cfg["sym"]==2)+ 3*int(cfg["sym"]==3))
if self.in1:
self.conv1 = conv(insym, conv_dim, 4)
self.conv2 = conv(conv_dim , conv_dim * 2 , 4)
else:
self.conca = cfg["conca"]
co = self.conca
self.conv1 = conv(insym*(co+1), conv_dim//(2-co), 4)
self.conv2 = conv(conv_dim//(2-co), conv_dim*2//(2-co), 4)
# residual blocks
if cfg["resB"]:
self.conv3= BasicBlock(conv_dim*2, conv_dim*2)
self.conv3a = BasicBlock(conv_dim * 2, conv_dim * 2, 3) if cfg["nExLay"] else nn.Identity()
self.conv4 = BasicBlock(conv_dim * 2, conv_dim * 2)
else:
self.conv3 = conv(conv_dim*2, conv_dim*2, 3, 1, 1)
self.conv3a = conv(conv_dim*2, conv_dim*2, 3, 1, 1) if cfg["nExLay"] else nn.Identity()
self.conv4 = conv(conv_dim*2, conv_dim*2, 3, 1, 1)
# decoding blocks
self.deconv1 = deconv(conv_dim*2, conv_dim, 4)
self.deconv2 = deconv(conv_dim, cfg["imCh"], 4, bn=False)
def geto(self,inx):
out = F.leaky_relu(self.conv1(inx), self.leak) # (?, 64, 16, 16)
out = F.leaky_relu(self.conv2(out), self.leak) # (?, 128, 8, 8)
return out
def forward(self, x):
x1,x2=x
if self.in1:
if self.sym:
xsym = torch.flip(x2, dims=(-1,))
if self.sym==1: x2=torch.cat([x2,xsym,torch.flip(x2, dims=(-2,))],dim=1)
if self.sym == 2: x2 = torch.cat([x2,torch.flip(xsym,dims=(-2,))], dim=1)
if self.sym==3: x2 = torch.cat([x2, xsym, torch.flip(x2, dims=(-2,)), torch.flip(xsym, dims=(-2,))], dim=1)
out = self.geto(x2)
else:
if self.sym>0: print("must flip etc for each input - not implemented see above how to do it")
if self.conca:
x=torch.cat([x1,x2],dim=1)
out=self.geto(x)
else:
out = torch.cat([self.geto(x1), self.geto(x2)], dim=1)
out = F.leaky_relu(self.conv3(out), self.leak) # ( " )
out = F.leaky_relu(self.conv3a(out), self.leak) if self.cfg["nExLay"] else out
out = F.leaky_relu(self.conv4(out), self.leak) # ( " )
out = F.leaky_relu(self.deconv1(out), self.leak) # (?, 64, 16, 16)
out = F.tanh(self.deconv2(out)) # (?, 3, 32, 32)
return out
class D1(nn.Module):
"""Discriminator for mnist."""
def __init__(self, conv_dim=64, use_labels=False):
super(D1, self).__init__()
self.conv1 = conv(1, conv_dim, 4, bn=False)
self.conv2 = conv(conv_dim, conv_dim*2, 4)
self.conv3 = conv(conv_dim*2, conv_dim*4, 4)
n_out = 11 if use_labels else 1
self.fc = conv(conv_dim*4, n_out, 4, 1, 0, False)
def forward(self, x):
out = F.leaky_relu(self.conv1(x), 0.05) # (?, 64, 16, 16)
out = F.leaky_relu(self.conv2(out), 0.05) # (?, 128, 8, 8)
out = F.leaky_relu(self.conv3(out), 0.05) # (?, 256, 4, 4)
out = self.fc(out).squeeze()
return out
class D2(nn.Module):
"""Discriminator for svhn."""
def __init__(self, conv_dim=64, use_labels=False):
super(D2, self).__init__()
self.conv1 = conv(1, conv_dim, 4, bn=False)
self.conv2 = conv(conv_dim, conv_dim*2, 4)
self.conv3 = conv(conv_dim*2, conv_dim*4, 4)
n_out = 11 if use_labels else 1
self.fc = conv(conv_dim*4, n_out, 4, 1, 0, False)
def forward(self, x):
out = F.leaky_relu(self.conv1(x), 0.05) # (?, 64, 16, 16)
out = F.leaky_relu(self.conv2(out), 0.05) # (?, 128, 8, 8)
out = F.leaky_relu(self.conv3(out), 0.05) # (?, 256, 4, 4)
out = self.fc(out).squeeze()
return out
| 5,990
| 41.792857
| 198
|
py
|
DoTra
|
DoTra-main/classifierModels.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Classifier models
import numpy as np
import torch
import torch.nn as nn
class BBlock(nn.Module):
def __init__(self, in_planes, planes,ker=3,down=True,pad=1):
super(BBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=ker, stride=1, padding=pad, bias=False)
self.bn=nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.mp = nn.MaxPool2d((2, 2), stride=2) if down else nn.Identity()
def forward(self, x):
out=self.conv1(x) #out = self.ident(out)
out= self.bn(out) #out = self.identBefR(out)
out = self.relu(out) #out = self.identBefS(out)
out = self.mp(out)
return out
class worNet(nn.Module):
def __init__(self, cfg):
super(worNet, self).__init__()
cf= cfg["clcfg"]
tr = lambda x: max(1, int(np.round(x * cfg["netSi"])))
#self.addN=cfg["addN"] self.oneh = cfg["onehot"]
self.in_channels = cfg["imCh"]
in_channels = self.in_channels
self.is11 = 1 if "11" in cf["netT"] else 0
#chans = [in_channels, 32, 64, 64, 128, 128, 256, 256, 512, 512] if self.is11 else [in_channels, 32, 64, 128, 256, 512]
chans = [in_channels, 64, 64, 64, 128, 128, 256, 256, 512, 512] if self.is11 else [in_channels, 64, 64, 128, 256, 512]
i=-1
def getConv(ker=cfg["ker"], down=True):
nonlocal i
i+=1 #return nn.Sequential(*[nn.Conv2d(in_channels=inoffs[i]+ (tr(chans[i]) if i>0 else chans[i]) , out_channels=tr(chans[i+1]), kernel_size=(ker, ker), padding=ker > 1), nn.BatchNorm2d(tr(chans[i+1])), relu] + ([mp] if down else []))
return BBlock((tr(chans[i]) if i>0 else chans[i]),tr(chans[i+1]), ker=ker,down=down,pad=(ker-1)//2)#inoffs[i]+
#if self.is11: self.conv0a = nn.Identity()
self.conv0 = getConv()
if self.is11: self.conv1a = getConv(down=False)
self.conv1 = getConv()
if self.is11: self.conv2a = getConv( down=False)
self.conv2 = getConv()
if self.is11: self.conv3a = getConv(ker=3, down=False)
self.conv3 = getConv(ker=3)
if self.is11: self.conv4a = getConv( down=False,ker=3)
self.conv4 = getConv(ker=3)
self.allays = [self.conv0, self.conv1, self.conv2, self.conv3, self.conv4]
if self.is11: self.allays = [self.conv0,self.conv1a,self.conv1, self.conv2a,self.conv2, self.conv3a,self.conv3, self.conv4a, self.conv4]
i, ker = -1, 1
self.avgpool = nn.AdaptiveMaxPool2d((1,1))
self.dropout = nn.Dropout(0.5) if cfg["drop"] else nn.Identity()
self.pred = nn.Linear(tr(512),tr(128) ) if cfg["twoLin"] else nn.Identity()
self.pred2 = nn.Linear(tr(128),cfg["ds"][1]) if cfg["twoLin"] else nn.Linear(tr(512),cfg["ds"][1])
#self.k=0
def forward(self, x):
# import imgutil as imgu # print(np.sum(np.abs(x.cpu().numpy()))) # imgu.makeAndStore(x.cpu().numpy(),x.cpu().numpy(),"Img",str(self.k)+".png") # self.k+=1 # self.k=self.k%10
for il,l in enumerate(self.allays): x = l(x)
x = self.avgpool(x)
x = torch.flatten(x, start_dim=1)
x=self.dropout(x)
x=self.pred(x)
x = self.pred2(x)
return x
def lin(c_in, c_out, bn=True, dr=False): # """Custom convolutional layer for simplicity."""
layers=[]
if dr > 0: layers.append(nn.Dropout(dr))
layers.append(nn.Linear(c_in, c_out, bias=not bn))
if bn: layers.append(nn.BatchNorm1d(c_out))
layers.append(nn.ReLU())
return layers
class linNet(nn.Module):
def __init__(self, cfg):
super(linNet, self).__init__()
n_out = cfg["ds"][1]
lw = cfg["aez"] * np.array([1] + cfg["llay"] + [1])
lw[-1] = n_out
lins = []
for j in range(len(lw) - 1):
dobn = cfg["lben"] and not (cfg["dben"] == 2 and (j + 1 == len(lw) - 1))
lins += lin(lw[j], lw[j + 1], dobn, cfg["ldrop"][j])
self.lays = nn.Sequential(*lins)
def forward(self, x):
x = self.lays(x)
return x
| 4,359
| 43.948454
| 246
|
py
|
DoTra
|
DoTra-main/AEModels.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Autoencoder models and training
import numpy as np
import pickle,os,copy
import torch.optim as optim
import torch.cuda.amp as tca
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.nn.functional as F
from aecyc.OnlineRep import imgutil
class AEDisc(nn.Module):
def __init__(self, cfg, input_size=(1, 32, 32)):
super(AEDisc, self).__init__()
output_size = 1
self.input_size = input_size
self.channel_mult = int(64 * cfg["netSi"])
bn = lambda x: nn.BatchNorm2d(x) if cfg["aeganbn"] else nn.Identity()
bn1d = lambda x: nn.BatchNorm1d(x) if cfg["aeganbn"] else nn.Identity()
slope = 0.2
self.conv = nn.Sequential(*[nn.Conv2d(in_channels=input_size[0], out_channels=self.channel_mult * 1, kernel_size=4, stride=2, padding=1), bn(self.channel_mult * 1), nn.LeakyReLU(slope, True),
nn.Conv2d(self.channel_mult * 1, self.channel_mult * 2, 4, 2, 1), bn(self.channel_mult * 2), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult * 2, self.channel_mult * 4, 4, 2, 1), bn(self.channel_mult * 4), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult * 4, self.channel_mult * 8, 4, 2, 1), bn(self.channel_mult * 8), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult * 8, self.channel_mult * 8, 3, 2, 1), bn(self.channel_mult * 8), nn.LeakyReLU(slope, inplace=True)])
# self.flat_fts = self.get_flat_fts(self.conv)
self.nin = self.channel_mult * 8
self.linear = nn.Sequential(nn.Linear(self.nin, output_size) )
def forward(self, x):
for il, l in enumerate(self.conv):
x = l(x)
x = torch.flatten(x, start_dim=1) # x.view(-1, self.flat_fts)
return self.linear(x)
class CNN_Encoder(nn.Module):
def __init__(self, cfg, input_size=(1, 32, 32)):
super(CNN_Encoder, self).__init__()
output_size=cfg["aez"]
self.input_size = input_size
self.channel_mult = int(64*(cfg["netSi"]+0.25*int("F" in cfg["ds"][0])))
bn = lambda x: nn.BatchNorm2d(x) if cfg["aebn"] else nn.Identity()
bn1d = lambda x: nn.BatchNorm1d(x) if cfg["aebn"] else nn.Identity()
slope=0.05
self.conv = nn.Sequential(*[nn.Conv2d(in_channels=input_size[0],out_channels=self.channel_mult*1,kernel_size=4,stride=2,padding=1), bn(self.channel_mult*1),nn.LeakyReLU(slope, True),
nn.Conv2d(self.channel_mult*1, self.channel_mult*2, 4, 2, 1), bn(self.channel_mult*2), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult*2, self.channel_mult*4, 4, 2, 1), bn(self.channel_mult*4), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult*4, self.channel_mult*8, 4, 2, 1),bn(self.channel_mult*8), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult*8, self.channel_mult*8, 3, 2, 1), bn(self.channel_mult*8), nn.LeakyReLU(slope, inplace=True)])
#self.flat_fts = self.get_flat_fts(self.conv)
self.nin=self.channel_mult*8
self.linear = nn.Sequential(nn.Linear(self.nin, output_size),bn1d(output_size),nn.LeakyReLU(slope),)
def forward(self, x):
for il,l in enumerate(self.conv):
x = l(x)
x = torch.flatten(x,start_dim=1)#x.view(-1, self.flat_fts)
return self.linear(x)
class CNN_Decoder(nn.Module):
def __init__(self, cfg):
super(CNN_Decoder, self).__init__()
self.input_dim = cfg["aez"] #cfg["aecfg"]["esize"]
self.channel_mult = int(64*(cfg["netSi"]+0.25*int("F" in cfg["ds"][0])))
self.fc_output_dim = self.channel_mult*16#self.input_dim#int(64*cfg["aecfg"]["netSi"]) #cfg["aecfg"]["esize"]#128#256
self.fc = nn.Sequential(nn.Linear(self.input_dim, self.fc_output_dim),nn.BatchNorm1d(self.fc_output_dim),nn.ReLU(True))
bn = lambda x: nn.BatchNorm2d(x) if cfg["aebn"] else nn.Identity()
slope=0.05
self.deconv = nn.Sequential(*[nn.ConvTranspose2d(self.fc_output_dim,self.channel_mult * 8, 4, 2,1, bias=False), bn(self.channel_mult * 8), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 8,self.channel_mult * 4, 4, 2,1, bias = False), bn(self.channel_mult * 4), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 4,self.channel_mult * 2, 4, 2, 1, bias = False), bn(self.channel_mult * 2), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 2,self.channel_mult * 1, 4, 2, 1, bias = False), bn(self.channel_mult * 1), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 1,cfg["imCh"], 4, 2, 1, bias = False)])
def forward(self, x):
x = self.fc(x)
x = x.view(-1, self.fc_output_dim, 1, 1)
for l in self.deconv: x=l(x)
return F.tanh(x)
class AENetwork(nn.Module):
def __init__(self, cfg):
super(AENetwork, self).__init__()
self.encoder = CNN_Encoder(cfg,input_size=(cfg["imCh"],32,32))
self.decoder = CNN_Decoder(cfg)
def enc(self, x): return self.encoder(x)
def dec(self, z): return self.decoder(z)
def forward(self, x):
z = self.enc(x)#.view(-1, 784))
return self.dec(z),z
def getAEDat(netAE,dataset,encoded=True):
netAE.eval()
aencx=[]
aency = []
def nor(x): return (x - np.mean(x, axis=(0, 2, 3), keepdims=True)) / (np.std(x, axis=(0, 2, 3), keepdims=True) + 1e-7)
with tca.autocast():
with torch.no_grad():
for i, data in enumerate(dataset):
x = data[0].cuda()
selfx,code = netAE(x)
tosave=code if encoded else selfx
aencx.append(tosave.detach().cpu().numpy())
aency.append(np.copy(data[1].cpu().numpy()))
return np.concatenate(aencx,axis=0),np.concatenate(aency,axis=0)
def getAEDatIter(netAE,trdata,tedata,encoded=True,cds=None):
aetrX, aetrY=getAEDat(netAE, trdata, encoded=encoded)
aetr_iter = cds(aetrX, aetrY, True, False)
aeteX, aeteY = getAEDat(netAE, tedata, encoded=encoded)
aete_iter = cds(aeteX, aeteY, False, False)
return aetr_iter,aete_iter
def runAE(cfg,dataset,tedataset,sname,cget,picname):
getM =getAEModel
netAE, acfg = getM(cfg, dataset, sname, cget,picname)
trds, teds = getAEDat(netAE, dataset), getAEDat(netAE, tedataset)
#imgutil.makeAndStore(trds[:64], trds[:64], cfg["bFolder"] + "samples/", "AE" + picname + fs(cfg) + ".png")
return trds,teds,acfg,netAE
def decay(ccf,epoch,optimizerCl):
if ccf["opt"][0] == "S" and (epoch + 1) % (ccf["opt"][1] // 3+ccf["opt"][1]//10+2 ) == 0:
for p in optimizerCl.param_groups: p['lr'] *= 0.1
print(" D", np.round(optimizerCl.param_groups[0]['lr'],5))
def getAEModel(cfg, train_dataset, sname, cget,picname=""): #Co,val_datasetMa,resFolder
ccf=cfg["aecfg"]
netAE = AENetwork(cfg).cuda()
if cfg["aeGAN"][0]:
netD=AEDisc(cfg).cuda()
optimizerD = optim.Adam(netD.parameters(), lr=cfg["aeGAN"][1]["lr"], betas=(0.5, 0.999))
optimizerG = optim.Adam(netAE.parameters(), lr=cfg["aeGAN"][1]["lr"], betas=(0.5, 0.999))
criterion = nn.BCEWithLogitsLoss()
real_label, fake_label = 1., 0.
gloss,drloss,dfloss=0,0,0
if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netAE.parameters(), lr=ccf["opt"][1], momentum=0.8, weight_decay=ccf["opt"][2])
elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netAE.parameters(), ccf["opt"][1], weight_decay=ccf["opt"][2])
else: "Error opt not found"
closs, trep, loss = 0, cfg["epA"], nn.MSELoss()#nn.CrossEntropyLoss()
print("Train AE")
scaler = tca.GradScaler()
ulo = lambda x,t,e: 0.97*x+0.03*t.item() if e>1 else 0.85*x+0.15*t.item()
torch.backends.cudnn.benchmark = True
for epoch in range(trep):
netAE.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
x=data[0].cuda()
outAE,lo=netAE(x)
errD_real = loss(torch.flatten(outAE,1),torch.flatten(x,1))
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
if cfg["aeGAN"][0]:
## Train with all-real batch
netD.zero_grad()
b_size = x.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float).cuda()
outreal = netD(x).view(-1)
errD_real = criterion(outreal, label)
scaler.scale(errD_real).backward()
## Train with all-fake batch
label.fill_(fake_label)
outfake = netD(outAE.detach()).view(-1)
errD_fake = criterion(outfake, label)
scaler.scale(errD_fake).backward()
scaler.step(optimizerD)
# (2) Update G network: maximize log(D(G(z)))
optimizerG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
outAE, _ = netAE(x)
outfake = netD(outAE).view(-1)
errG = criterion(outfake, label)
scaler.scale(errG).backward()
scaler.step(optimizerG)
scaler.update()
gloss = ulo(gloss, errG, epoch)
drloss = ulo(drloss, errD_real, epoch)
dfloss = ulo(dfloss, errD_fake, epoch)
closs = ulo(closs,errD_real,epoch)
decay(ccf,epoch,optimizerCl)
netAE.eval()
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, "AE", np.round(np.array([closs]+([gloss,drloss,dfloss] if cfg["aeGAN"][0] else [])), 5), cfg["pr"])
if np.isnan(closs):
print("Failed!!!")
return None,None
lcfg = {"AELo": closs}
if cfg["aeGAN"][0]: lcfg={**lcfg,**{"glo":gloss,"drlo":drloss,"dflo":dfloss}}
netAE.eval()
return netAE, lcfg
| 10,633
| 49.398104
| 203
|
py
|
DoTra
|
DoTra-main/trainClassifiers.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Training of classifiers (and also DoTra on paired samples)
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.cuda.amp as tca
from classifierModels import worNet,linNet
from torch.utils.data import Dataset,TensorDataset
from doTraModel import Trans
import imgutil
niter = 1e10
def decay(ccf,epoch,optimizerCl,warmup,trep):
if epoch==warmup[0]:
for p in optimizerCl.param_groups: p['lr'] *= (warmup[1] if ccf["opt"][0] == "S" else warmup[1]/3)
print(" W", np.round(optimizerCl.param_groups[0]['lr'],5))
if ccf["opt"][0] == "S" and (epoch + 1) % int(trep// 3+10+warmup[0] ) == 0:
for p in optimizerCl.param_groups: p['lr'] *= 0.1
print(" D", np.round(optimizerCl.param_groups[0]['lr'],5))
def getSingleAcc(net, dsx, labels, pool=None):
with tca.autocast():
outputs = net(dsx)
_, predicted = torch.max(outputs.data, 1)
correct = torch.eq(predicted,labels).sum().item()
return correct
def getAcc(net, dataset, niter=10000,cfg=None):
correct,total = 0,0
net.eval()
with torch.no_grad():
for cit,data in enumerate(dataset):
with tca.autocast():
dsx,dsy = data[0].cuda(),data[1].cuda().unsqueeze(-1)
outputs = net(dsx) # if useAtt: # errD_real = loss(output[0], dsy.long())+loss(output[1], dsy.long()) # output=output[1] #prediction outputs # else:
total += dsy.size(0)
_, predicted = torch.max(outputs, 1)
correct += torch.eq(predicted, dsy.squeeze().long()).sum()
if cit>=niter: break
return float((correct*1.0/total).cpu().numpy())
def getCorr(net, dataset):
correct = []
conf=[]
net.eval()
with torch.no_grad():
for cit,data in enumerate(dataset):
with tca.autocast():
dsx,dsy = data[0].cuda(),data[1].cuda().unsqueeze(-1)
outputs = net(dsx)
preconf, predicted = torch.max(outputs, 1)
correct.append(torch.eq(predicted, dsy.squeeze().long()).detach().cpu().numpy())
conf.append(preconf.detach().cpu().numpy())
return np.concatenate(correct,axis=0),np.concatenate(conf,axis=0)
def getCls(net, dataset):
net.eval()
bx,by=[],[]
with torch.no_grad():
for cit,data in enumerate(dataset):
with tca.autocast():
dsx,dsy = data[0].cuda(),data[1].cuda().unsqueeze(-1)
outputs = net(dsx) # if useAtt: # errD_real = loss(output[0], dsy.long())+loss(output[1], dsy.long()) # output=output[1] #prediction outputs # else:
_, predicted = torch.max(outputs, 1)
by.append(predicted.detach().cpu().numpy())
bx.append(data[0].cpu().numpy())
return np.concatenate(bx,axis=0),np.concatenate(by,axis=0)
def setEval(netCl):
netCl.eval()
for name, module in netCl.named_modules():
if isinstance(module, nn.Dropout): module.p = 0
elif isinstance(module, nn.LSTM): module.dropout = 0 #print("zero lstm drop") #print("zero drop")
elif isinstance(module, nn.GRU): module.dropout = 0
def getTrans(cfg,train_dataset,val_dataset,dat,traname,cget,selfTra=False):
ccf=cfg["trcfg"]
NETWORK = Trans #if "V" in ccf["netT"] else (res.ResNet10 if ccf["netT"] == "R10" else res.ResNet18)
netCl = NETWORK(cfg).cuda()
closs, teaccs, trep, clr,telo = 0, [], cfg["epC"], ccf["opt"][1],0
loss = nn.MSELoss() if cfg["traLo"] else nn.L1Loss()
warmup = (max(2,trep//40), 10)
#if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netCl.parameters(), lr=ccf["opt"][1]/warmup[1], momentum=0.8, weight_decay=ccf["opt"][2])
#elif ccf["opt"][0] == "A": #else: "Error opt not found"
optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][1], weight_decay=ccf["opt"][2])
print("Train Trans",ccf)
scaler = tca.GradScaler()
nDom=2#len(cfg["trans"])+1-cfg["nFor"] #Last for testing
inds=np.zeros(3,dtype=np.int)
torch.backends.cudnn.benchmark = True
for epoch in range(trep):
netCl.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
xdata=data[:-1]
if cfg["singleIn"]:
inds[0] = np.random.choice(nDom - 1) # -4 = X0, -3=X1, -2=XTe, -1=XPreTest
inds[1] = inds[0]
inds[2] = inds[1] + 1
else:
inds[0]=np.random.choice(nDom - 2) #-4 = X0, -3=X1, -2=XTe, -1=XPreTest
inds[1] = inds[0] + 1
if cfg["ranCh"]: inds[1] +=np.random.choice(nDom - 2-inds[0])
inds[2]=inds[1]+1
dsx=[xdata[cind].cuda() for cind in inds] #dsy = data[1].cuda()
output = netCl(dsx[:2])
errD_real = loss(output,dsx[-1]) # errD_real.backward() # optimizerCl.step()
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf,epoch,optimizerCl,warmup,trep)
netCl.eval()
#if epoch%16==0: store(pre="Ep_" + str(epoch)+"_",dirapp="Tmp",output=output,xdata=xdata)
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, np.round(np.array([closs]),5), cfg["pr"])#teAccs[-1], clAcc(train_dataset)
if np.isnan(closs):
print("Failed!!!")
return None,None,None
def getLo(ds,off=0):
telo,nele=0,0
for i, xdata in enumerate(ds):
with tca.autocast():
with torch.no_grad():
ainds = [nDom - 3+off, nDom - 2+off, nDom - 1+off] # Shift by one to get test
dsx = [xdata[cind].cuda() for cind in ainds]
output = netCl(dsx[:2])
telo += loss(output, dsx[-1])
nele+=dsx[0].shape[0]
return (telo/nele).item()
def transform(cfg, orgTransModel, traX, traY):
def cds(X, Y, shuffle=False, norm=True):
noX = imgutil.nor(X.astype(np.float32)) if norm else X
ds = TensorDataset(torch.from_numpy(noX), torch.from_numpy(Y))
return torch.utils.data.DataLoader(ds, batch_size=cfg["batchSize"], shuffle=shuffle, num_workers=0)
cajx, cajy = [], []
orgTransModel.eval()
for data in cds(traX, traY, shuffle=False):
with tca.autocast():
# with torch.no_grad():
dsx = data[0].cuda()
out1 = orgTransModel([None, dsx])
output = orgTransModel([None, out1]).detach().cpu()
cajx.append(output.clone().numpy())
cajy.append(data[-1].clone().numpy())
return cds(np.concatenate(cajx, axis=0), np.concatenate(cajy, axis=0))
lcfg = { "trLo": closs,"tetrLo": getLo(train_dataset),"teteLo": getLo(val_dataset)}#,"D1AccTra":traAcc
setEval(netCl)
return netCl, lcfg,False
def getclassifier(cfg,train_dataset,val_dataset,sname,getc,save=False,loadCl=True,useLat=False):
print(sname,"Cl")
ccf=cfg["clcfg"]
if useLat: NETWORK = linNet
else: NETWORK = worNet #if "V" in ccf["netT"] else (res.ResNet10 if ccf["netT"] == "R10" else res.ResNet18)
netCl = NETWORK(cfg).cuda()
closs, teaccs, trep, loss, clr = 0, [], cfg["epC"], nn.CrossEntropyLoss(), ccf["opt"][1]
warmup = (max(2,trep//40), 10)
if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netCl.parameters(), lr=ccf["opt"][1]/warmup[1], momentum=0.9, weight_decay=ccf["opt"][2]) #elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][2], weight_decay=ccf["opt"][3])
else: "Error opt not found"
print("Train CL",sname,ccf)
scaler = tca.GradScaler()
teAccs=[]
clAcc = lambda dataset: getAcc(netCl, dataset, niter=niter,cfg=cfg)
crolo=nn.CrossEntropyLoss()
torch.backends.cudnn.benchmark = True
for epoch in range(trep):
netCl.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
dsx,dsy = data[0].cuda(),data[1].cuda()
output = netCl(dsx)
errD_real = crolo(output,dsy.long())
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf,epoch,optimizerCl,warmup,trep)
netCl.eval()
teAccs.append(clAcc(val_dataset))
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, np.round(np.array([closs, teAccs[-1], clAcc(train_dataset)]), 5), cfg["pr"])
if np.isnan(closs):
print("Failed!!!")
return None,None,None
mteA=np.max(np.array(teAccs))
lcfg = {"teA": teAccs[-1], "trA": clAcc(train_dataset), "Lo": closs,"mteA":mteA}
setEval(netCl)
return netCl, lcfg
def getLinCl(cfg,train_dataset,val_dataset,sname,getc,save=True,loadCl=True):
ccf=cfg["clcfg"]
from aecyc.latAEModels import LinCl
netCl = LinCl(cfg).cuda()
closs, teaccs, trep, loss, clr = 0, [], cfg["epC"], nn.CrossEntropyLoss(), ccf["opt"][1]/4 #Train just 1/2 as long
warmup = (max(2,trep//40), 10)
if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netCl.parameters(), lr=clr/warmup[1], momentum=0.8, weight_decay=ccf["opt"][2]/5) #elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][2], weight_decay=ccf["opt"][3])
else: "Error opt not found"
print("Train CL",sname,ccf)
scaler = tca.GradScaler()
teAccs=[]
clAcc = lambda dataset: getAcc(netCl, dataset, niter=niter,cfg=cfg)
crolo=nn.CrossEntropyLoss()
for epoch in range(trep):
netCl.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
dsx,dsy = data[0].cuda(),data[1].cuda()
output = netCl(dsx)
errD_real = crolo(output,dsy.long())
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf,epoch,optimizerCl,warmup,trep)
netCl.eval()
teAccs.append(clAcc(val_dataset))
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, np.round(np.array([closs, teAccs[-1], clAcc(train_dataset)]), 5), cfg["pr"])
if np.isnan(closs):
print("Failed!!!")
return None,None,None
lcfg = {"LiteA": clAcc(val_dataset), "LitrA": clAcc(train_dataset), "LiLo": closs}
setEval(netCl)
return netCl, lcfg
| 11,382
| 45.461224
| 258
|
py
|
DoTra
|
DoTra-main/dutils.py
|
#Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
from scipy import ndimage
from torch.utils.data import Dataset
import numpy as np,os,sklearn #,pickle imageio,time,
import torchvision,torch
import torchvision.transforms as transforms
def getnorm(dname):
if dname == "MNIST":
return (torch.from_numpy(np.array((0.1307), np.float32).reshape(1, 1, 1, 1)).cuda(), torch.from_numpy(np.array((0.3081), np.float32).reshape(1, 1, 1, 1)).cuda())
def getFullDS(cfg,ntrain,sname,cget):
dname=cfg["ds"][0]
trans=transforms.Compose([transforms.ToTensor()])
if dname == "MNIST":
cdat = torchvision.datasets.MNIST
cfg["imCh"] = 1
down=True
cpa="."
fname="Mnist"
if not os.path.exists(fname+"teX") or cget:
os.makedirs(cpa,exist_ok=True)
def loadStore(isTrain,ndat):
trainset = cdat(root=".", train=isTrain, download=down,transform=trans)
train_dataset = torch.utils.data.DataLoader(trainset, batch_size=ndat, num_workers=0) # cfg["batchSize"]
ds = next(iter(train_dataset))
X,y=ds[0].clone().numpy(),ds[1].clone().numpy()
print("Data stats",dname,X.shape,np.mean(X,axis=(0,2,3)),np.std(X,axis=(0,2,3)))
if (dname == "MNIST" or dname == "Fash") and cfg["imSi"]!=28:
X=[ndimage.zoom(X[i,0],cfg["imSi"]/28) for i in range(X.shape[0])]
X=np.stack(X,axis=0)
X=np.expand_dims(X,axis=1)
ds = [X, y]
ds = sklearn.utils.shuffle(*ds) # , random_state=cfg["seed"])
t=np.float16
preamb="tr" if isTrain else "te"
with open(fname + preamb+"X", "wb") as f: np.save(f, ds[0].astype(t), allow_pickle=True)
with open(fname + preamb+"Y", "wb") as f: np.save(f, ds[1].astype(np.int16), allow_pickle=True)
#return trainset
loadStore(True,ntrain)
loadStore(False, ntrain)
lo = lambda na: np.load(open(fname + na, "rb"), allow_pickle=True)
trX,trY=lo("trX"),lo("trY")
teX,teY=lo("teX"),lo("teY")
norm=getnorm(dname)
trX = (trX - norm[0].cpu().numpy()) / norm[1].cpu().numpy()
teX = (teX - norm[0].cpu().numpy()) / norm[1].cpu().numpy()
return (trX, trY), (teX, teY)#, None,norm
| 2,472
| 44.796296
| 198
|
py
|
Disco
|
Disco-master/Python/plotDiscoR.py
|
import sys
import h5py as h5
import numpy as np
import matplotlib.pyplot as plt
def loadCheckpoint(filename):
f = h5.File(filename, "r")
piph = f['Data']['Cells'][:,-1][...]
prim = f['Data']['Cells'][:,:-1][...]
index = f['Grid']['Index'][...]
idPhi0 = f['Grid']['Id_phi0'][...]
nphi = f['Grid']['Np'][...]
t = f['Grid']['T'][0]
riph = f['Grid']['r_jph'][...]
ziph = f['Grid']['z_kph'][...]
r = np.zeros(piph.shape)
R = 0.5*(riph[1:] + riph[:-1])
for i in xrange(index.shape[0]):
for k in xrange(index.shape[1]):
ind0 = index[i,k]
ind1 = ind0 + nphi[i,k]
r[ind0:ind1] = R[i]
return t, r, prim
def plotCheckpoint(file):
print("Loading {0:s}...".format(file))
t, r, prim = loadCheckpoint(file)
print(" Plotting...")
nq = prim.shape[1]
fig, ax = plt.subplots(2,3,figsize=(14,9))
plotAx(ax[0,0], r, prim[:,0], "linear", "linear", r"$r$", r"$\rho$", 'k+')
plotAx(ax[0,1], r, prim[:,1], "linear", "linear", r"$r$", r"$P$", 'k+')
plotAx(ax[1,0], r, prim[:,2], "linear", "linear", r"$r$", r"$u_r$", 'k+')
plotAx(ax[1,1], r, prim[:,3], "linear", "linear", r"$r$", r"$u_\phi$",'k+')
plotAx(ax[1,2], r, prim[:,4], "linear", "linear", r"$r$", r"$u_z$", 'k+')
if nq > 5:
plotAx(ax[0,2], r, prim[:,5], "linear", "linear", r"$r$", r"$q$", 'k+')
title = "DISCO t = {0:.3g}".format(t)
#fig.suptitle(title, fontsize=18)
plt.tight_layout()
name = file.split('.')[0].split('_')[-1]
plotname = "plot_r_{0:s}.png".format(name)
print(" Saving {0:s}...".format(plotname))
fig.savefig(plotname)
plt.close(fig)
def plotAx(ax, x, y, xscale, yscale, xlabel, ylabel, *args, **kwargs):
ax.plot(x, y, *args, **kwargs)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Makes plots of Disco prims as a function of r.")
print("usage: python plotDiscoR.py <checkpoint.h5 ...>")
sys.exit()
files = sys.argv[1:]
for f in files:
plotCheckpoint(f)
| 2,180
| 26.607595
| 79
|
py
|
OpenFWI
|
OpenFWI-main/pytorch_ssim.py
|
# From https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
| 2,722
| 35.306667
| 104
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.