repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/datasets/multi_mnist.py
|
from pathlib import Path
import codecs
import gzip
import urllib
import random
import numpy as np
from scipy import ndimage
from PIL import Image
import torch
class MultiMNIST(torch.utils.data.Dataset):
urls = [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'training.pth'
test_file = 'test.pth'
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = Path(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if train:
self.data, self.labels_l, self.labels_r = torch.load(
self.root / self.processed_folder /self.training_file)
else:
self.data, self.labels_l, self.labels_r = torch.load(
self.root / self.processed_folder / self.test_file)
if transform is not None:
self.data = [self.transform(Image.fromarray(
img.numpy().astype(np.uint8), mode='L')) for img in self.data]
def __getitem__(self, index):
img, target_l, target_r = self.data[index], self.labels_l[index], self.labels_r[index]
return img, torch.stack([target_l, target_r])
def __len__(self):
return len(self.data)
def _check_exists(self):
return (self.root / self.processed_folder / self.training_file).is_file() and \
(self.root / self.processed_folder / self.test_file).is_file()
def download(self):
if self._check_exists():
return
# download files
(self.root / self.raw_folder).mkdir(parents=True, exist_ok=True)
(self.root / self.processed_folder).mkdir(parents=True, exist_ok=True)
for url in self.urls:
print('Downloading ' + url)
data = urllib.request.urlopen(url)
filename = url.rpartition('/')[2]
file_path = self.root / self.raw_folder / filename
with open(file_path, 'wb') as f:
f.write(data.read())
with open(self.root / self.raw_folder / '.'.join(filename.split('.')[:-1]), 'wb') as out_f, \
gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
file_path.unlink()
# process and save as torch files
print('Processing...')
multi_mnist_ims, extension = self.read_image_file(
self.root / self.raw_folder / 'train-images-idx3-ubyte', shift_pix=4, rand_shift=True)
multi_mnist_labels_l, multi_mnist_labels_r = self.read_label_file(
self.root / self.raw_folder / 'train-labels-idx1-ubyte', extension)
tmulti_mnist_ims, textension = self.read_image_file(
self.root / self.raw_folder / 't10k-images-idx3-ubyte', shift_pix=4, rand_shift=True)
tmulti_mnist_labels_l, tmulti_mnist_labels_r = self.read_label_file(
self.root / self.raw_folder / 't10k-labels-idx1-ubyte', textension)
multi_mnist_training_set = (multi_mnist_ims, multi_mnist_labels_l, multi_mnist_labels_r)
multi_mnist_test_set = (tmulti_mnist_ims, tmulti_mnist_labels_l, tmulti_mnist_labels_r)
with open(self.root / self.processed_folder / self.training_file, 'wb') as f:
torch.save(multi_mnist_training_set, f)
with open(self.root / self.processed_folder / self.test_file, 'wb') as f:
torch.save(multi_mnist_test_set, f)
print('Done!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(
tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(
tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
@staticmethod
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
@staticmethod
def read_label_file(path, extension):
with open(path, 'rb') as f:
data_1 = f.read()
assert MultiMNIST.get_int(data_1[:4]) == 2049
with open(path, 'rb') as f:
data_2 = f.read()
assert MultiMNIST.get_int(data_2[:4]) == 2049
length = MultiMNIST.get_int(data_1[4:8])
parsed_1 = np.frombuffer(data_1, dtype=np.uint8, offset=8)
parsed_2 = np.frombuffer(data_2, dtype=np.uint8, offset=8)
multi_labels_l = np.zeros(length, dtype=np.long)
multi_labels_r = np.zeros(length, dtype=np.long)
for im_id in range(length):
multi_labels_l[im_id] = parsed_1[im_id]
multi_labels_r[im_id] = parsed_2[extension[im_id]]
return (torch.from_numpy(multi_labels_l).view(-1).long(),
torch.from_numpy(multi_labels_r).view(-1).long())
@staticmethod
def read_image_file(path, shift_pix=4, rand_shift=True, rot_range=(0, 0), corot=True):
with open(path, 'rb') as f:
data_1 = f.read()
assert MultiMNIST.get_int(data_1[:4]) == 2051
with open(path, 'rb') as f:
data_2 = f.read()
assert MultiMNIST.get_int(data_2[:4]) == 2051
length = MultiMNIST.get_int(data_1[4:8])
num_rows = MultiMNIST.get_int(data_1[8:12])
num_cols = MultiMNIST.get_int(data_1[12:16])
parsed_1 = np.frombuffer(data_1, dtype=np.uint8, offset=16)
pv_1 = parsed_1.reshape(length, num_rows, num_cols)
parsed_2 = np.frombuffer(data_2, dtype=np.uint8, offset=16)
pv_2 = parsed_2.reshape(length, num_rows, num_cols)
multi_data = np.zeros((length, num_rows, num_cols))
extension = np.zeros(length, dtype=np.int32)
rights = np.random.permutation(length)
for left in range(length):
extension[left] = rights[left]
lim = pv_1[left, :, :]
rim = pv_2[rights[left], :, :]
if not rot_range[0] == rot_range[1] == 0:
if corot:
rot_deg = random.randint(rot_range[0], rot_range[1])
lim = ndimage.rotate(lim, rot_deg, reshape=False)
rim = ndimage.rotate(rim, rot_deg, reshape=False)
else:
rot_deg = random.randint(rot_range[0], rot_range[1])
lim = ndimage.rotate(lim, rot_deg, reshape=False)
rot_deg = random.randint(rot_range[0], rot_range[1])
rim = ndimage.rotate(rim, rot_deg, reshape=False)
# in case of 100% overlapping
shift_pix1 = shift_pix2 = 0
if rand_shift:
if random.choice([True, False]):
shift_pix1 = random.randint(0, shift_pix - 1)
shift_pix2 = random.randint(0, shift_pix)
else:
shift_pix1 = random.randint(0, shift_pix)
shift_pix2 = random.randint(1, shift_pix)
new_im = np.zeros((36, 36))
new_im[shift_pix1:shift_pix1 + 28, shift_pix1:shift_pix1 + 28] += lim
new_im[shift_pix2 + 4:shift_pix2 + 4 + 28, shift_pix2 + 4:shift_pix2 + 4 + 28] += rim
new_im = np.clip(new_im, 0, 255)
multi_data_im = np.array(Image.fromarray(new_im).resize((28, 28), resample=Image.NEAREST))
multi_data[left, :, :] = multi_data_im
return torch.from_numpy(multi_data).view(length, num_rows, num_cols), extension
| 8,297
| 42.904762
| 105
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/pareto/datasets/__init__.py
|
from .multi_mnist import MultiMNIST
| 36
| 17.5
| 35
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/submission/pretty_tabular.py
|
# Source code for ICML submission #640 "Efficient Continuous Pareto Exploration in Multi-Task Learning"
class PrettyTabular(object):
def __init__(self, head):
self.head = head
def head_string(self):
line = ''
for key, value in self.head.items():
try:
dummy = value.format(0) # Try digits.
except:
dummy = value.format('0') # Try strings.
span = max(len(dummy), len(key)) + 2
key_format = '{:^' + str(span) + '}'
line += key_format.format(key)
return line
def row_string(self, row_data):
line = ''
for key, value in self.head.items():
data = value.format(row_data[key])
span = max(len(key), len(data)) + 2
line += ' ' * (span - len(data) - 1) + data + ' '
return line
if __name__ == '__main__':
# head[name] = (format).
head = { 'iter': '{:4d}', 'objective': '{:3.6e}', 'violations': '{:3.6e}' }
tabular = PrettyTabular(head)
import numpy as np
from common import *
for i in range(20):
if i % 10 == 0:
print_info(tabular.head_string())
row_data = { 'iter': i, 'objective': np.random.rand(), 'violations': np.random.rand() }
print(tabular.row_string(row_data))
| 1,325
| 33.894737
| 103
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/submission/min_norm_solver.py
|
import sys
from itertools import combinations
import numpy as np
import torch
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = (v2v2 - v1v2) / (v1v1 + v2v2 - 2 * v1v2)
# v2v2 - gamm * gamma * (v1 - v2)^2
# cost = v2v2 - gamma * gamma * (v1v1 + v2v2 - 2 * v1v2)
# = v2v2 - gamma * (v2v2 - v1v2)
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = None
dps = vecs.matmul(vecs.t()).cpu().numpy()
for i, j in combinations(range(len(vecs)), 2):
c, d = _min_norm_element_from2(dps[i, i], dps[i, j], dps[j, j])
if dmin is None:
dmin = d
if d <= dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = _projection2simplex(next_point)
return next_point
def find_min_norm_element(vecs, max_iter=250, stop_crit=1e-5):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j;
the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
init_sol, dps = _min_norm_2d(vecs.detach())
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
while iter_count < max_iter:
grad_dir = -1.0 * np.dot(dps, sol_vec)
new_point = _next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[i, j]
v1v2 += sol_vec[i] * new_point[j] * dps[i, j]
v2v2 += new_point[i] * new_point[j] * dps[i, j]
nc, nd = _min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < stop_crit:
break
sol_vec = new_sol_vec
return sol_vec, nd
if __name__ == '__main__':
import numpy as np
import cvxpy as cp
n = 10
v1 = np.random.normal(size=n)
v2 = np.random.normal(size=n)
v1v1 = v1.dot(v1)
v1v2 = v1.dot(v2)
v2v2 = v2.dot(v2)
# min \|c * x1 + (1 - c) * x2\|^2.
# Ground truth.
alpha = cp.Variable(2)
V = np.array([v1, v2]) # V: 2 * n.
objective = cp.Minimize(cp.sum_squares(V.T @ alpha))
constraints = [alpha >= 0, cp.sum(alpha) == 1]
prob = cp.Problem(objective, constraints)
loss = prob.solve()
gamma, cost = _min_norm_element_from2(v1v1, v1v2, v2v2)
print('loss:', loss, 'alpha:', alpha.value)
print('loss:', cost, 'alpha:', [gamma, 1 - gamma])
| 4,675
| 29.966887
| 109
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/submission/common.py
|
# Source code for ICML submission #640 "Efficient Continuous Pareto Exploration in Multi-Task Learning"
import numpy as np
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
def print_error(*message):
print('\033[91m', 'ERROR ', *message, '\033[0m')
raise RuntimeError
def print_ok(*message):
print('\033[92m', *message, '\033[0m')
def print_warning(*message):
print('\033[93m', *message, '\033[0m')
def print_info(*message):
print('\033[96m', *message, '\033[0m')
def ndarray(x):
return np.asarray(x, dtype=np.float64)
# f: R^n -> R.
# grad: R^n -> R^n.
def check_grad(f, grad, x0, options={}):
eps = 1e-6 if 'eps' not in options else options['eps']
atol = 1e-6 if 'atol' not in options else options['atol']
rtol = 1e-4 if 'rtol' not in options else options['rtol']
analytic_g = grad(x0)
n = x0.size
for i in range(n):
x0_pos = np.copy(x0)
x0_pos[i] += eps
f_pos = f(x0_pos)
x0_neg = np.copy(x0)
x0_neg[i] -= eps
f_neg = f(x0_neg)
numeric_g = (f_pos - f_neg) / 2 / eps
assert np.isclose(numeric_g, analytic_g[i], atol=atol, rtol=rtol), \
print_error('at x[{}]: {}, {}'.format(i, numeric_g, analytic_g[i]))
# f: R^n -> R.
# grad: R^n -> R^n.
# hess: R^n -> R^{n x n}.
def check_hess(f, grad, hess, x0, options={}):
eps = 1e-6 if 'eps' not in options else options['eps']
atol = 1e-6 if 'atol' not in options else options['atol']
rtol = 1e-4 if 'rtol' not in options else options['rtol']
analytic_h = hess(x0)
n = x0.size
for i in range(n):
x0_pos = np.copy(x0)
x0_pos[i] += eps
x0_neg = np.copy(x0)
x0_neg[i] -= eps
g_pos = grad(x0_pos)
g_neg = grad(x0_neg)
numeric_h = (g_pos - g_neg) / 2 / eps
assert np.allclose(numeric_h, analytic_h[i], atol=atol, rtol=rtol), \
print_error('at x[{}]: {}, {}'.format(i, numeric_h, analytic_h[i]))
# True if x is dominated by y: y <= x and y != x.
def dominated(x, y, atol=1e-8):
diff = x - y
diff[np.isclose(diff, 0, atol=atol)] = 0
return np.min(diff) >= 0 and np.max(diff) > 0
# Pareto stationary points -> pareto optimal points.
# xs: k x n matrix, i.e., k n-dimensional points.
# fs: k x m matrix, i.e., k m-dimensional f(points).
def filter_pareto_stationary_points(xs, fs, atol=1e-8):
xs = np.asarray(xs)
fs = np.asarray(fs)
assert len(xs.shape) == 2 and len(fs.shape) == 2
assert xs.shape[0] == fs.shape[0]
x_filtered = []
f_filtered = []
for x, f in zip(xs, fs):
if not np.any([dominated(f, f2, atol) for f2 in fs]):
x_filtered.append(x)
f_filtered.append(f)
return np.asarray(x_filtered), np.asarray(f_filtered)
def compute_hypervolume(fs, ref_point):
fs = ndarray(fs)
if fs.size == 0: return 0
assert len(fs.shape) == 2 and fs.shape[1] == 2, print_error('>2 dimensional cases are not implemented yet.')
# Sort fs.
idx = np.argsort(fs[:, 0])
fs = fs[idx]
hv = 0.0
f_last = ref_point[1]
for f1, f2 in fs:
hv += (ref_point[0] - f1) * (f_last - f2)
f_last = f2
return hv
# Drawing functions.
# Fancy 3d arrow draing.
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def draw_arrow_3d(ax, head, tail, color, label=None):
arrow = Arrow3D([tail[1], head[1]], [tail[2], head[2]], [tail[0], head[0]],
mutation_scale=24, lw=8, arrowstyle='-|>', color=color, label=label)
ax.add_artist(arrow)
def draw_arrow_2d(ax, head, tail, color, thickness, head_length, padding, label=None):
arrow_length = np.linalg.norm(head - tail)
if arrow_length < padding * 2 + head_length: return
arrow_unit = (head - tail) / arrow_length
tail_shifted = tail + arrow_unit * padding
head_shifted = tail + arrow_unit * (arrow_length - padding - head_length)
ax.arrow(tail_shifted[0], tail_shifted[1], head_shifted[0] - tail_shifted[0], head_shifted[1] - tail_shifted[1],
width=thickness, head_length=head_length, fc=color, ec=color, label=label, alpha=0.5)
| 4,513
| 35.112
| 116
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/submission/zdt2_variant.py
|
# Source code for ICML submission #640 "Efficient Continuous Pareto Exploration in Multi-Task Learning"
import numpy as np
from common import *
class Zdt2Variant(object):
def __init__(self):
self.n = 3
self.m = 2
self.eval_f_cnt = 0
self.eval_grad_cnt = 0
self.eval_hvp_cnt = 0
def reset_count(self):
self.eval_f_cnt = 0
self.eval_grad_cnt = 0
self.eval_hvp_cnt = 0
def __remap(self, x):
x = ndarray(x).ravel()
assert x.size == self.n
x2 = np.zeros(self.n)
x2[0] = np.sin(x[0] + x[1] ** 2 + x[2] ** 2) * 0.5 + 0.5
s = np.sum(x[1:] ** 2)
x2[1:] = 0.5 * np.cos(s) + 0.5
return x2
def __remap_grad(self, x):
x = ndarray(x).ravel()
assert x.size == self.n
jac = np.zeros((self.n, self.n))
jac[0] = 0.5 * np.cos(x[0] + x[1] ** 2 + x[2] ** 2) * ndarray([1, 2 * x[1], 2 * x[2]])
s = np.sum(x[1:] ** 2)
g_s = np.zeros(self.n)
g_s[1:] = 2 * x[1:]
jac[1:] = -0.5 * np.sin(s) * g_s
return jac
def __remap_hess(self, x):
x = ndarray(x).ravel()
assert x.size == self.n
hess = np.zeros((self.n, self.n, self.n))
s = np.sum(x[1:] ** 2)
hess[0, 0, 0] = 0.5 * -np.sin(x[0] + s)
hess[0, 0, 1:] = -np.sin(x[0] + s) * x[1:]
hess[0, 1:, 0] = hess[0, 0, 1:]
for i in range(1, self.n):
hess[0, i, i] = np.cos(x[0] + s) + -np.sin(x[0] + s) * 2 * x[i] ** 2
for i in range(1, self.n):
for j in range(i + 1, self.n):
hess[0, i, j] = hess[0, j, i] = x[i] * -np.sin(x[0] + s) * 2 * x[j]
g_s = np.zeros(self.n)
g_s[1:] = 2 * x[1:]
for i in range(1, self.n):
hess[1:, :, i] = -0.5 * np.cos(s) * g_s[i] * g_s
hess[1:, i, i] += -0.5 * np.sin(s) * 2
return hess
def f(self, x):
self.eval_f_cnt += 1
return self.__f(self.__remap(x))
def __f(self, x):
x = ndarray(x).ravel()
assert x.size == self.n
f1 = x[0]
g = 1 + 9 / (self.n - 1) * np.sum(x[1:])
f2 = g * (1 - (x[0] / g) ** 2)
return ndarray([f1, f2])
def grad(self, x):
self.eval_grad_cnt += 1
x_new = self.__remap(x)
grad_x_new = self.__remap_grad(x)
g1, g2 = self.__grad(x_new)
return ndarray([g1.T @ grad_x_new, g2.T @ grad_x_new])
def __grad(self, x):
x = ndarray(x).ravel()
assert x.size == self.n
g1 = np.zeros(self.n)
g1[0] = 1
grad_g = np.zeros(self.n)
grad_g[1:] = 9 / (self.n - 1)
g = 1 + 9 / (self.n - 1) * np.sum(x[1:])
g2 = grad_g * (1 - (x[0] / g) ** 2)
g2[0] += -2 * x[0] / g
g2[1:] += 2 * (x[0] / g) ** 2 * grad_g[1:]
return ndarray([g1, g2])
def hess(self, x):
x_new = self.__remap(x)
g1, g2 = self.__grad(x_new)
h1, h2 = self.__hess(x_new)
g_remap = self.__remap_grad(x)
h_remap = self.__remap_hess(x)
# f(u, v), u = g(x1, x2), v = g(x1, x2).
# df/dx1 = df/du * du/dx1 + df/dv * dv/dx1 = g1.dot(g_remap[:, 0])
# ddf/dx1dx2 = (h1 @ g_remap[:, 1]).dot(g_remap[:, 0]) + g1.dot(h_remap[:, 0, 1])
h1_remap = g_remap.T @ (h1 @ g_remap)
h2_remap = g_remap.T @ (h2 @ g_remap)
for i in range(self.n):
h1_remap[i] += g1.T @ h_remap[:, i, :]
h2_remap[i] += g2.T @ h_remap[:, i, :]
return ndarray([h1_remap, h2_remap])
def __hess(self, x):
x = ndarray(x).ravel()
assert x.size == self.n
h1 = np.zeros((self.n, self.n))
h2 = np.zeros((self.n, self.n))
g = 1 + 9 / (self.n - 1) * np.sum(x[1:])
grad_g = np.zeros(self.n)
grad_g[1:] = 9 / (self.n - 1)
# g2[0] = -2 * x[0] / g
h2[0, 0] = -2 / g
h2[0, 1:] = 18 * x[0] / g / g / (self.n - 1)
# g2[1] = 9 / (n - 1) * (1 + (x[0] / g) ** 2)
h2[1:, 0] = 18 * x[0] / g / g / (self.n - 1)
h2[1:, 1:] = -2 / g * (9 / (self.n - 1) * x[0] / g) ** 2
return ndarray([h1, h2])
def hvp(self, x, alpha, v):
self.eval_hvp_cnt += 1
h1, h2 = self.hess(x)
alpha = ndarray(alpha).ravel()
assert alpha.size == self.m
v = ndarray(v).ravel()
assert v.size == self.n
return ndarray(alpha[0] * h1 @ v + alpha[1] * h2 @ v)
def sample_pareto_set(self):
x = np.zeros(self.n)
x[0] = np.random.uniform(-np.pi / 2, np.pi / 2) - np.pi
theta = np.random.uniform(-np.pi, np.pi)
c, s = np.cos(theta), np.sin(theta)
x[1] = np.sqrt(np.pi) * c
x[2] = np.sqrt(np.pi) * s
return ndarray(x)
def plot_pareto_set(self, ax):
x1_low, x1_high = -np.pi / 2 - np.pi, np.pi / 2 - np.pi
r = np.sqrt(np.pi)
theta = np.linspace(-np.pi, np.pi, 33)
X2, X3 = r * np.cos(theta), r * np.sin(theta)
X1 = np.outer(np.linspace(x1_low, x1_high, 9), np.ones(theta.size))
face_color = np.zeros((X1.shape[0], X1.shape[1], 3))
face_color[:] = [0.85, 0.93, 0.92]
ax.plot_surface(X2, X3, X1, alpha=0.25, facecolors=face_color)
ax.set_xlim([-2 * r, 2 * r])
ax.set_ylim([-2 * r, 2 * r])
ax.set_zlim([x1_low, x1_high])
ax.set_xlabel('$x_2$')
ax.set_ylabel('$x_3$')
ax.set_zlabel('$x_1$')
def plot_pareto_front(self, ax, label='Pareto front'):
# Analytic Pareto front.
f1 = np.linspace(0.0, 1.0, 101)
f2 = 1 - f1 ** 2
if label is None:
ax.plot(f1, f2, 'k-.')
else:
ax.plot(f1, f2, 'k-.', label=label)
ax.set_xlabel('$f_1$')
ax.set_ylabel('$f_2$')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xticks(np.linspace(0, 1, 6))
ax.set_yticks(np.linspace(0, 1, 6))
ax.set_aspect('equal')
ax.grid(True)
if __name__ == '__main__':
# Check gradients.
problem = Zdt2Variant()
n, m = 3, 2
x0 = np.random.normal(size=n)
for i in range(m):
f = lambda x: problem.f(x)[i]
g = lambda x: problem.grad(x)[i]
check_grad(f, g, x0)
h = lambda x : problem.hess(x)[i]
check_hess(f, g, h, x0)
# Check Pareto front.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig, ax = plt.subplots(1, 1)
problem.plot_pareto_front(ax)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
problem.plot_pareto_set(ax)
plt.show()
plt.close()
| 6,675
| 30.790476
| 103
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/multi_mnist/weighted_sum.py
|
import random
from pathlib import Path
from termcolor import colored
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torchvision import transforms
from pareto.metrics import topk_accuracy
from pareto.datasets import MultiMNIST
from pareto.networks import MultiLeNet
from pareto.utils import evenly_dist_weights
@torch.no_grad()
def evaluate(network, dataloader, device, closures, header=''):
num_samples = 0
losses = np.zeros(2)
top1s = np.zeros(2)
network.train(False)
for images, labels in dataloader:
batch_size = len(images)
num_samples += batch_size
images = images.to(device)
labels = labels.to(device)
logits = network(images)
losses_batch = [c(network, logits, labels).item() for c in closures]
losses += batch_size * np.array(losses_batch)
top1s[0] += batch_size * topk_accuracy(logits[0], labels[:, 0], k=1)
top1s[1] += batch_size * topk_accuracy(logits[1], labels[:, 1], k=1)
losses /= num_samples
top1s /= num_samples
loss_msg = '[{}]'.format('/'.join([f'{loss:.6f}' for loss in losses]))
top1_msg = '[{}]'.format('/'.join([f'{top1 * 100.0:.2f}%' for top1 in top1s]))
msgs = [
f'{header}:' if header else '',
'loss', colored(loss_msg, 'yellow'),
'top@1', colored(top1_msg, 'yellow')
]
print(' '.join(msgs))
return losses, top1s
def train(pref, ckpt_name):
# prepare hyper-parameters
seed = 42
cuda_enabled = True
cuda_deterministic = False
batch_size = 256
num_workers = 2
lr = 0.01
momentum = 0.9
weight_decay = 0.0
num_epochs = 30
# prepare path
root_path = Path(__file__).resolve().parent
dataset_path = root_path / 'MultiMNIST'
ckpt_path = root_path / 'weighted_sum'
root_path.mkdir(parents=True, exist_ok=True)
dataset_path.mkdir(parents=True, exist_ok=True)
ckpt_path.mkdir(parents=True, exist_ok=True)
# fix random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cuda_enabled and torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# prepare device
if cuda_enabled and torch.cuda.is_available():
import torch.backends.cudnn as cudnn
device = torch.device('cuda')
if cuda_deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
else:
cudnn.benchmark = True
else:
device = torch.device('cpu')
# prepare dataset
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
trainset = MultiMNIST(dataset_path, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testset = MultiMNIST(dataset_path, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# prepare network
network = MultiLeNet()
network.to(device)
# prepare losses
criterion = F.cross_entropy
closures = [lambda n, l, t: criterion(l[0], t[:, 0]), lambda n, l, t: criterion(l[1], t[:, 1])]
# prepare optimizer
optimizer = SGD(network.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
lr_scheduler = CosineAnnealingLR(optimizer, num_epochs * len(trainloader))
# save initial state
if not (ckpt_path / 'random.pth').is_file():
random_ckpt = {
'state_dict': network.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict()
}
torch.save(random_ckpt, ckpt_path / 'random.pth')
random_ckpt = torch.load(ckpt_path / 'random.pth', map_location='cpu')
network.load_state_dict(random_ckpt['state_dict'])
optimizer.load_state_dict(random_ckpt['optimizer'])
lr_scheduler.load_state_dict(random_ckpt['lr_scheduler'])
# first evaluation
evaluate(network, testloader, device, closures, f'{ckpt_name}')
# training
num_steps = len(trainloader)
for epoch in range(1, num_epochs + 1):
network.train(True)
trainiter = iter(trainloader)
for _ in range(1, num_steps + 1):
images, labels = next(trainiter)
images = images.to(device)
labels = labels.to(device)
logits = network(images)
losses = [c(network, logits, labels) for c in closures]
loss = sum(w * l for w, l in zip(pref, losses))
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
losses, tops = evaluate(network, testloader, device, closures, f'{ckpt_name}: {epoch}/{num_epochs}')
# saving
ckpt = {
'state_dict': network.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'preference': pref,
}
record = {'losses': losses, 'tops': tops}
ckpt['record'] = record
torch.save(ckpt, ckpt_path / f'{ckpt_name}.pth')
def weighted_sum(num_prefs=5):
prefs = evenly_dist_weights(num_prefs + 2, 2)
for i, pref in enumerate(prefs):
train(pref, str(i))
if __name__ == '__main__':
weighted_sum(5)
| 5,501
| 26.928934
| 117
|
py
|
ContinuousParetoMTL
|
ContinuousParetoMTL-master/multi_mnist/cpmtl.py
|
import random
from pathlib import Path
from termcolor import colored
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import SGD
from torchvision import transforms
from pareto.metrics import topk_accuracy
from pareto.optim import VisionHVPSolver, MINRESKKTSolver
from pareto.datasets import MultiMNIST
from pareto.networks import MultiLeNet
from pareto.utils import TopTrace
@torch.no_grad()
def evaluate(network, dataloader, device, closures, header=''):
num_samples = 0
losses = np.zeros(2)
top1s = np.zeros(2)
network.train(False)
for images, labels in dataloader:
batch_size = len(images)
num_samples += batch_size
images = images.to(device)
labels = labels.to(device)
logits = network(images)
losses_batch = [c(network, logits, labels).item() for c in closures]
losses += batch_size * np.array(losses_batch)
top1s[0] += batch_size * topk_accuracy(logits[0], labels[:, 0], k=1)
top1s[1] += batch_size * topk_accuracy(logits[1], labels[:, 1], k=1)
losses /= num_samples
top1s /= num_samples
loss_msg = '[{}]'.format('/'.join([f'{loss:.6f}' for loss in losses]))
top1_msg = '[{}]'.format('/'.join([f'{top1 * 100.0:.2f}%' for top1 in top1s]))
msgs = [
f'{header}:' if header else '',
'loss', colored(loss_msg, 'yellow'),
'top@1', colored(top1_msg, 'yellow')
]
print(' '.join(msgs))
return losses, top1s
def train(start_path, beta):
# prepare hyper-parameters
seed = 42
cuda_enabled = True
cuda_deterministic = False
batch_size = 2048
num_workers = 2
shared = False
stochastic = False
kkt_momentum = 0.0
create_graph = False
grad_correction = False
shift = 0.0
tol = 1e-5
damping = 0.1
maxiter = 50
lr = 0.1
momentum = 0.0
weight_decay = 0.0
num_steps = 10
verbose = False
# prepare path
ckpt_name = start_path.name.split('.')[0]
root_path = Path(__file__).resolve().parent
dataset_path = root_path / 'MultiMNIST'
ckpt_path = root_path / 'cpmtl' / ckpt_name
if not start_path.is_file():
raise RuntimeError('Pareto solutions not found.')
root_path.mkdir(parents=True, exist_ok=True)
dataset_path.mkdir(parents=True, exist_ok=True)
ckpt_path.mkdir(parents=True, exist_ok=True)
# fix random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cuda_enabled and torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# prepare device
if cuda_enabled and torch.cuda.is_available():
import torch.backends.cudnn as cudnn
device = torch.device('cuda')
if cuda_deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
else:
cudnn.benchmark = True
else:
device = torch.device('cpu')
# prepare dataset
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
trainset = MultiMNIST(dataset_path, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testset = MultiMNIST(dataset_path, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# prepare network
network = MultiLeNet()
network.to(device)
# initialize network
start_ckpt = torch.load(start_path, map_location='cpu')
network.load_state_dict(start_ckpt['state_dict'])
# prepare losses
criterion = F.cross_entropy
closures = [lambda n, l, t: criterion(l[0], t[:, 0]), lambda n, l, t: criterion(l[1], t[:, 1])]
# prepare HVP solver
hvp_solver = VisionHVPSolver(network, device, trainloader, closures, shared=shared)
hvp_solver.set_grad(batch=False)
hvp_solver.set_hess(batch=True)
# prepare KKT solver
kkt_solver = MINRESKKTSolver(
network, hvp_solver, device,
stochastic=stochastic, kkt_momentum=kkt_momentum, create_graph=create_graph,
grad_correction=grad_correction, shift=shift, tol=tol, damping=damping, maxiter=maxiter)
# prepare optimizer
optimizer = SGD(network.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
# first evaluation
losses, tops = evaluate(network, testloader, device, closures, f'{ckpt_name}')
# prepare utilities
top_trace = TopTrace(len(closures))
top_trace.print(tops, show=False)
beta = beta.to(device)
# training
for step in range(1, num_steps + 1):
network.train(True)
optimizer.zero_grad()
kkt_solver.backward(beta, verbose=verbose)
optimizer.step()
losses, tops = evaluate(network, testloader, device, closures, f'{ckpt_name}: {step}/{num_steps}')
top_trace.print(tops)
ckpt = {
'state_dict': network.state_dict(),
'optimizer': optimizer.state_dict(),
'beta': beta,
}
record = {'losses': losses, 'tops': tops}
ckpt['record'] = record
torch.save(ckpt, ckpt_path / f'{step:d}.pth')
hvp_solver.close()
def cpmtl():
root_path = Path(__file__).resolve().parent
start_root = root_path / 'weighted_sum'
beta = torch.Tensor([1, 0])
for start_path in sorted(start_root.glob('[0-9]*.pth'), key=lambda x: int(x.name.split('.')[0])):
train(start_path, beta)
if __name__ == "__main__":
cpmtl()
| 5,661
| 25.092166
| 117
|
py
|
DeepAA
|
DeepAA-master/resnet_imagenet.py
|
import os
import tensorflow as tf
# ref: https://github.com/gahaalt/resnets-in-tensorflow2/blob/master/Models/Resnets.py
_bn_momentum = 0.9
def regularized_padded_conv(*args, **kwargs):
return tf.keras.layers.Conv2D(*args, **kwargs, padding='same', kernel_regularizer=_regularizer, bias_regularizer=_regularizer,
kernel_initializer='he_normal', use_bias=False)
def bn_relu(x, gamma_initializer='ones'):
x = tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum, gamma_initializer=gamma_initializer)(x)
return tf.keras.layers.ReLU()(x)
def shortcut(x, filters, stride, mode):
if x.shape[-1] == filters: # maybe and stride==1
return x
elif mode == 'B':
return regularized_padded_conv(filters, 1, strides=stride)(x)
elif mode == 'B_original':
x = regularized_padded_conv(filters, 1, strides=stride)(x)
return tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(x)
elif mode == 'A':
return tf.pad(tf.keras.layers.MaxPool2D(1, stride)(x) if stride > 1 else x,
paddings=[(0, 0), (0, 0), (0, 0), (0, filters - x.shape[-1])])
else:
raise KeyError("Parameter shortcut_type not recognized!")
def original_block(x, filters, stride=1, **kwargs):
c1 = regularized_padded_conv(filters, 3, strides=stride)(x)
c2 = regularized_padded_conv(filters, 3)(bn_relu(c1))
c2 = tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(c2)
mode = 'B_original' if _shortcut_type == 'B' else _shortcut_type
x = shortcut(x, filters, stride, mode=mode)
return tf.keras.layers.ReLU()(x + c2)
def bootleneck_block(x, filters, stride=1, preact_block=False): # preact_block==False
# flow = bn_relu(x)
# if preact_block:
# x = flow
residual = x
c1 = regularized_padded_conv(filters // _bootleneck_width, 1)(bn_relu(x))
c2 = regularized_padded_conv(filters // _bootleneck_width, 3, strides=stride)(bn_relu(c1))
c3 = regularized_padded_conv(filters, 1)(bn_relu(c2))
if x.shape[-1] != filters or stride != 1:
residual = shortcut(x, filters, stride, mode=_shortcut_type)
return tf.keras.layers.ReLU()(residual + tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum, gamma_initializer='zeros')(c3))
def group_of_blocks(x, block_type, num_blocks, filters, stride, block_idx=0):
global _preact_shortcuts
preact_block = False
x = block_type(x, filters, stride, preact_block=preact_block)
for i in range(num_blocks - 1):
x = block_type(x, filters)
return x
def Resnet(input_shape, n_classes, l2_reg=1e-4, group_sizes=(2, 2, 2), features=(16, 32, 64), strides=(1, 2, 2),
shortcut_type='B', block_type='preactivated', first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
dropout=0, cardinality=1, bootleneck_width=4, preact_shortcuts=True):
global _regularizer, _shortcut_type, _preact_projection, _dropout, _cardinality, _bootleneck_width, _preact_shortcuts
_bootleneck_width = bootleneck_width # used in ResNeXts and bootleneck blocks
_regularizer = tf.keras.regularizers.l2(l2_reg)
_shortcut_type = shortcut_type # used in blocks
_cardinality = cardinality # used in ResNeXts
_dropout = dropout # used in Wide ResNets
_preact_shortcuts = preact_shortcuts
block_types = {
# 'preactivated': preactivation_block,
'bootleneck': bootleneck_block,
'original': original_block
}
selected_block = block_types[block_type]
inputs = tf.keras.layers.Input(shape=input_shape)
flow = regularized_padded_conv(**first_conv)(inputs)
# if block_type == 'original':
flow = bn_relu(flow)
flow = tf.keras.layers.MaxPool2D(pool_size=(3,3), strides=2, padding='same')(flow)
for block_idx, (group_size, feature, stride) in enumerate(zip(group_sizes, features, strides)):
flow = group_of_blocks(flow,
block_type=selected_block,
num_blocks=group_size,
block_idx=block_idx,
filters=feature,
stride=stride)
# if block_type != 'original':
# flow = bn_relu(flow)
flow = tf.keras.layers.GlobalAveragePooling2D()(flow)
outputs = tf.keras.layers.Dense(n_classes, kernel_regularizer=_regularizer, bias_regularizer=_regularizer, use_bias=True)(flow)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
def imagenet_resnet50(block_type='bootleneck', shortcut_type='B_original', l2_reg=0.5e-4, load_weights=False, input_shape=(224,224,3), n_classes=1000):
bootleneck_width = 4
model = Resnet(input_shape=input_shape, n_classes=n_classes, l2_reg=l2_reg, group_sizes=(3,4,6,3),
features=(64*bootleneck_width, 128*bootleneck_width, 256*bootleneck_width, 512*bootleneck_width),
strides=(1, 2, 2, 2), first_conv={"filters": 64, "kernel_size": 7, "strides": 2},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False,
bootleneck_width=bootleneck_width)
return model
def imagenet_resnet50_pretrained(input_shape, n_classes, l2_reg):
_regularizer = tf.keras.regularizers.l2(l2_reg)
inputs = tf.keras.layers.Input(shape=input_shape)
base_model = tf.keras.applications.resnet50.ResNet50(include_top=False, input_shape=input_shape,
pooling='avg', weights='imagenet')
base_model.trainable = False
x = base_model(inputs, training=False) # do not update batch augmentation
outputs = tf.keras.layers.Dense(n_classes, kernel_regularizer=_regularizer, bias_regularizer=_regularizer, use_bias=True)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
def imagenet_resnet18(block_type='original', shortcut_type='B_original', l2_reg=0.5e-4, load_weights=False, input_shape=(224,224,3), n_classes=1000):
model = Resnet(input_shape=input_shape, n_classes=n_classes, l2_reg=l2_reg, group_sizes=(2,2,2,2),
features=(64, 128, 256, 512),
strides=(1, 2, 2, 2), first_conv={"filters": 64, "kernel_size": 7, "strides": 2},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False,
bootleneck_width=None)
return model
def load_weights_func(model, model_name):
try:
model.load_weights(os.path.join('saved_models', model_name + '.tf'))
except tf.errors.NotFoundError:
print("No weights found for this model!")
return model
if __name__ == '__main__':
model = imagenet_resnet50()
| 6,826
| 46.082759
| 151
|
py
|
DeepAA
|
DeepAA-master/lr_scheduler.py
|
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import LearningRateSchedule
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops, control_flow_ops
class GradualWarmup_Cosine_Scheduler(LearningRateSchedule):
def __init__(self, starting_lr, initial_lr, ending_lr, warmup_steps, total_steps, name=None):
super(GradualWarmup_Cosine_Scheduler, self).__init__()
self.starting_lr = starting_lr
self.initial_lr = initial_lr
self.ending_lr = ending_lr
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or 'GradualWarmup_Cosine') as name:
initial_lr = ops.convert_to_tensor_v2(self.initial_lr, name='initial_learning_rate')
dtype = initial_lr.dtype
starting_lr = math_ops.cast(self.starting_lr, dtype)
ending_lr = math_ops.cast(self.ending_lr, dtype)
warmup_steps = math_ops.cast(self.warmup_steps, dtype)
total_steps = math_ops.cast(self.total_steps, dtype)
one = math_ops.cast(1.0, dtype)
point5 = math_ops.cast(0.5, dtype)
pi = math_ops.cast(3.1415926536, dtype)
step = math_ops.cast(step, dtype)
lr = tf.cond(step < warmup_steps,
true_fn=lambda: self._warmup_schedule(starting_lr, initial_lr, step, warmup_steps),
false_fn=lambda: self._cosine_annealing_schedule(initial_lr, ending_lr, step, warmup_steps, total_steps, pi,
point5, one))
return lr
def _warmup_schedule(self, starting_lr, initial_lr, step, warmup_steps):
ratio = math_ops.divide(step, warmup_steps)
lr = math_ops.add(starting_lr,
math_ops.multiply(initial_lr - starting_lr, ratio))
return lr
def _cosine_annealing_schedule(self, initial_lr, ending_lr, step, warmup_steps, total_steps, pi, point5, one):
ratio = math_ops.divide(step - warmup_steps, total_steps - warmup_steps)
cosine_ratio_pi = math_ops.cos(math_ops.multiply(ratio, pi))
second_part = math_ops.multiply(point5,
math_ops.multiply(initial_lr - ending_lr,
one + cosine_ratio_pi))
lr = math_ops.add(ending_lr, second_part)
return lr
def get_config(self):
return {
'starting_lr': self.starting_lr,
'initial_lr': self.initial_lr,
'ending_lr': self.ending_lr,
'warmup_steps': self.warmup_steps,
'total_steps': self.total_steps,
'name': self.name
}
| 2,824
| 46.083333
| 133
|
py
|
DeepAA
|
DeepAA-master/DeepAA_utils.py
|
import os
import logging
import numpy as np
import copy
import random
import datetime
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
tf.get_logger().setLevel(logging.ERROR)
from data_generator import DataGenerator, DataAugmentation
from utils import CTLHistory
from lr_scheduler import GradualWarmup_Cosine_Scheduler
import resnet
from resnet_imagenet import imagenet_resnet50
from data_generator import get_cifar10_data, get_cifar100_data
from augmentation import AutoContrast, Invert, Equalize, Solarize, Posterize, Contrast, Brightness, Sharpness, \
Identity, Color, ShearX, ShearY, TranslateX, TranslateY, Rotate
from augmentation import RandCrop, RandCutout, RandFlip, RandCutout60
from augmentation import RandResizeCrop_imagenet, centerCrop_imagenet
from policy import DA_Policy_logits
from augmentation import IMAGENET_SIZE
import torch
import threading
import queue
from imagenet_data_utils import get_imagenet_split
def aug_op_cifar_list(): # oeprators and their ranges
l = [
(Identity, 0., 1.0), # 0
(ShearX, -0.3, 0.3), # 1
(ShearY, -0.3, 0.3), # 2
(TranslateX, -0.45, 0.45), # 3
(TranslateY, -0.45, 0.45), # 4
(Rotate, -30., 30.), # 5
(AutoContrast, 0., 1.), # 6
(Invert, 0., 1.), # 7
(Equalize, 0., 1.), # 8
(Solarize, 0., 256.), # 9
(Posterize, 4., 8.), # 10,
(Contrast, 0.1, 1.9), # 11
(Color, 0.1, 1.9), # 12
(Brightness, 0.1, 1.9), # 13
(Sharpness, 0.1, 1.9), # 14
(RandFlip, 0., 1.0), # 15
(RandCutout, 0., 1.0), # 16
(RandCrop, 0., 1.0), # 17
]
names = []
for op in l:
info = op.__str__().split(' ')
name = '{}:({},{}'.format(info[1], info[-2], info[-1])
names.append(name)
return l, names
def aug_op_imagenet_list(): # 16 oeprations and their ranges
l = [
(Identity, 0., 1.0), # 0
(ShearX, -0.3, 0.3), # 1
(ShearY, -0.3, 0.3), # 2
(TranslateX, -0.45, 0.45), # 3
(TranslateY, -0.45, 0.45), # 4
(Rotate, -30., 30.), # 5
(AutoContrast, 0., 1.), # 6
(Invert, 0., 1.), # 7
(Equalize, 0., 1.), # 8
(Solarize, 0., 256.), # 9
(Posterize, 4., 8.), # 10
(Contrast, 0.1, 1.9), # 11
(Color, 0.1, 1.9), # 12
(Brightness, 0.1, 1.9), # 13
(Sharpness, 0.1, 1.9), # 14
(RandFlip, 0., 1.0), # 15
(RandCutout60, 0., 1.0), # 16
(RandResizeCrop_imagenet, 0., 1.),
]
names = []
for op in l:
info = op.__str__().split(' ')
name = '{}:({},{}'.format(info[1], info[-2], info[-1])
names.append(name)
return l, names
# Get the model
def get_model(args, model, n_classes):
if model == 'WRN_28_10':
model = resnet.cifar_WRN_28_10(dropout=0, l2_reg=0.00025,
preact_shortcuts=False, n_classes=n_classes, input_shape=args.img_size)
elif model == 'WRN_40_2':
model = resnet.cifar_WRN_40_2(dropout=0, l2_reg=0.00025,
preact_shortcuts=False, n_classes=n_classes, input_shape=args.img_size)
elif model == 'resnet50':
model = imagenet_resnet50()
else:
raise Exception('Unrecognized model')
return model
# metric to keep track of
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
train_loss = tf.keras.metrics.Mean()
test_loss = tf.keras.metrics.Mean()
def get_img_size(args):
if 'cifar' in args.dataset:
return (32, 32, 3)
elif 'imagenet' in args.dataset:
return (*IMAGENET_SIZE, 3)
else:
raise Exception
# get the data
def get_dataset(args):
print('Loading train and retrain dataset.')
if args.dataset in ['cifar10', 'cifar100']:
if args.dataset == 'cifar10':
assert args.n_classes == 10
x_train_, y_train_, x_val, y_val, x_test, y_test = get_cifar10_data(val_size=10000)
x_train, y_train = x_train_[:args.pretrain_size], y_train_[:args.pretrain_size]
x_search, y_search = x_train_[args.pretrain_size:], y_train_[args.pretrain_size:]
elif args.dataset == 'cifar100':
assert args.n_classes == 100
x_train_, y_train_, x_val, y_val, x_test, y_test = get_cifar100_data(val_size=10000)
x_train, y_train = x_train_[:args.pretrain_size], y_train_[:args.pretrain_size]
x_search, y_search = x_train_[args.pretrain_size:], y_train_[args.pretrain_size:]
train_ds = DataGenerator(x_train, y_train, batch_size=args.batch_size, drop_last=True)
search_ds = DataGenerator(x_search, y_search, batch_size=args.batch_size, drop_last=True)
val_ds = DataGenerator(x_val, y_val, batch_size=args.val_batch_size, drop_last=True)
test_ds = DataGenerator(x_test, y_test, batch_size=args.test_batch_size, drop_last=False, shuffle=False) # setting shuffle=False for parallel evaluation
elif args.dataset == 'imagenet':
assert args.n_classes == 1000
def collate_fn_imagenet_list(l): # return a list
images, labels = zip(*l)
assert images[0].dtype == np.uint8
return list(images), np.array(labels, dtype=np.int32)
if args.dataset == 'imagenet':
train_ds_total, val_ds, search_ds, train_ds, test_ds = get_imagenet_split(n_GPU=1, seed=300)
assert len(train_ds) == 1 and isinstance(train_ds, list), 'Train_ds should be a length=1 list'
train_ds = train_ds[0]
test_ds = torch.utils.data.DataLoader(
test_ds, batch_size=256, shuffle=False, num_workers=64,
pin_memory=False,
drop_last=False, sampler=None,
collate_fn=collate_fn_imagenet_list,
)
else:
raise Exception('Unrecognized dataset')
return train_ds, val_ds, test_ds, search_ds
def get_augmentation(args):
if 'cifar' in args.dataset:
augmentation_default = DataAugmentation(num_classes=args.n_classes, dataset=args.dataset, image_shape=args.img_size,
ops_list=(None, None),
default_pre_aug=None,
default_post_aug=[RandCrop,
RandFlip,
RandCutout])
augmentation_search = DataAugmentation(num_classes=args.n_classes, dataset=args.dataset, image_shape=args.img_size,
ops_list=aug_op_cifar_list(),
default_pre_aug=None,
default_post_aug=None)
augmentation_test = DataAugmentation(num_classes=args.n_classes, dataset=args.dataset, image_shape=args.img_size,
ops_list=(None, None),
default_pre_aug=None,
default_post_aug=None)
elif 'imagenet' in args.dataset:
augmentation_default = DataAugmentation(num_classes=args.n_classes, dataset=args.dataset,
image_shape=args.img_size,
ops_list=(None, None),
default_pre_aug=None,
default_post_aug=[RandResizeCrop_imagenet, #
RandFlip])
augmentation_search = DataAugmentation(num_classes=args.n_classes, dataset=args.dataset, image_shape=args.img_size,
ops_list=aug_op_imagenet_list(),
default_pre_aug=None,
default_post_aug=None)
augmentation_test = DataAugmentation(num_classes=args.n_classes, dataset=args.dataset,
image_shape=args.img_size,
ops_list=(None, None),
default_pre_aug=None,
default_post_aug=[
centerCrop_imagenet,
])
return augmentation_default, augmentation_search, augmentation_test
def get_optim_net(args, nb_train_steps):
scheduler_lr = GradualWarmup_Cosine_Scheduler(starting_lr=0., initial_lr=args.pretrain_lr,
ending_lr=1e-7,
warmup_steps= 0,
total_steps=nb_train_steps * args.nb_epochs)
optim_net = tf.optimizers.SGD(learning_rate=scheduler_lr, momentum=0.9, nesterov=True)
return optim_net
def get_policy(args, op_names, ops_mid_magnitude, available_policies):
policy = DA_Policy_logits(args.l_ops, args.l_mags, args.l_uniq,
op_names=op_names,
ops_mid_magnitude=ops_mid_magnitude, N_repeat_random=args.N_repeat_random,
available_policies=available_policies)
return policy
def get_optim_policy(policy_lr):
optim_policy = tf.optimizers.Adam(learning_rate=policy_lr, beta_1=0.9, beta_2=0.999)
return optim_policy
# get the loss
def get_loss_fun():
train_loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
test_loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)
val_loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)
return train_loss_fun, test_loss_fun, val_loss_fun
def get_lops_luniq(args, ops_mid_magnitude):
if 'cifar' in args.dataset:
_, op_names = aug_op_cifar_list()
elif 'imagenet' in args.dataset:
_, op_names = aug_op_imagenet_list()
else:
raise Exception('Unknown dataset ={}'.format(args.dataset))
names_modified = [op_name.split(':')[0] for op_name in op_names]
l_ops = len(op_names)
l_uniq = 0
for k_name, name in enumerate(names_modified):
mid_mag = ops_mid_magnitude[name]
if mid_mag == 'random':
l_uniq += 1 # The op is a random op, however we only sample one op
elif mid_mag is not None and mid_mag >=0 and mid_mag <= args.l_mags-1:
l_uniq += args.l_mags-1
elif mid_mag is not None and mid_mag == -1: # magnitude==-1 means all l_mags are independnt policies; or mid_mag > args.l_mags-1)
l_uniq += args.l_mags
elif mid_mag is None:
l_uniq += 1
else:
raise Exception('mid_mag = {} is invalid'.format(mid_mag))
return l_ops, l_uniq
def get_all_policy(policy_train):
l_ops, l_mags = policy_train.l_ops, policy_train.l_mags
ops, mags = np.meshgrid(np.arange(l_ops), np.arange(l_mags), indexing='ij')
ops = np.reshape(ops, [l_ops*l_mags,1])
mags = np.reshape(mags, [l_ops*l_mags,1])
return ops.astype(np.int32), mags.astype(np.int32)
class PrefetchGenerator(threading.Thread):
def __init__(self, search_ds, val_ds, n_classes, search_bs=8, val_bs=64):
threading.Thread.__init__(self)
self.queue = queue.Queue(1)
self.search_ds = search_ds
self.val_ds = val_ds
self.n_classes = n_classes
self.search_bs = search_bs
self.val_bs = val_bs
self.daemon = True
self.start()
@staticmethod
def sample_label_and_batch(dataset, bs, n_classes, MAX_iterations=100):
for k in range(MAX_iterations):
try:
lab = random.randint(0, n_classes-1)
imgs, labs = dataset.sample_labeled_data_batch(lab, bs)
except:
print('Insufficient data in a single class, try {}/{}'.format(k, MAX_iterations))
continue
return lab, imgs, labs
raise Exception('Maximum number of iteration {} reached'.format(MAX_iterations))
def run(self):
while True:
images_val, labels_val, images_train, labels_train = [], [], [], []
for _ in range(self.search_bs):
lab, imgs_val, labs_val = PrefetchGenerator.sample_label_and_batch(self.val_ds, self.val_bs, self.n_classes)
imgs_train, labs_train = self.search_ds.sample_labeled_data_batch(lab, 1)
images_val.append(imgs_val)
labels_val.append(labs_val)
images_train.append(imgs_train)
labels_train.append(labs_train)
self.queue.put( (images_val, labels_val, images_train, labels_train) )
def next(self):
next_item = self.queue.get()
return next_item
def save_policy(args, all_using_policies, augmentation_search):
ops, mags = all_using_policies[0].unique_policy
op_names = augmentation_search.op_names
policy_probs = []
for k_policy, policy in enumerate(all_using_policies):
policy_probs.append(tf.nn.softmax(policy.logits).numpy())
policy_probs = np.stack(policy_probs, axis=0)
np.savez('./policy_port/policy_DeepAA_{}.npz'.format(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")),
policy_probs=policy_probs, l_ops=args.l_ops, l_mags=args.l_mags,
ops=ops, mags=mags, op_names=op_names)
| 13,983
| 42.7
| 161
|
py
|
DeepAA
|
DeepAA-master/imagenet_data_utils.py
|
import numpy as np
import tensorflow as tf
from torchvision.datasets.imagenet import *
from torch import randperm, default_generator
from torch._utils import _accumulate
from torch.utils.data.dataset import Subset
_DATA_TYPE = tf.float32
CMYK_IMAGES = [
'n01739381_1309.JPEG',
'n02077923_14822.JPEG',
'n02447366_23489.JPEG',
'n02492035_15739.JPEG',
'n02747177_10752.JPEG',
'n03018349_4028.JPEG',
'n03062245_4620.JPEG',
'n03347037_9675.JPEG',
'n03467068_12171.JPEG',
'n03529860_11437.JPEG',
'n03544143_17228.JPEG',
'n03633091_5218.JPEG',
'n03710637_5125.JPEG',
'n03961711_5286.JPEG',
'n04033995_2932.JPEG',
'n04258138_17003.JPEG',
'n04264628_27969.JPEG',
'n04336792_7448.JPEG',
'n04371774_5854.JPEG',
'n04596742_4225.JPEG',
'n07583066_647.JPEG',
'n13037406_4650.JPEG',
]
PNG_IMAGES = ['n02105855_2933.JPEG']
class ImageNet(ImageFolder):
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Copied from torchvision, besides warning below.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
WARN::
This is the same ImageNet class as in torchvision.datasets.imagenet, but it has the `ignore_archive` argument.
This allows us to only copy the unzipped files before training.
"""
def __init__(self, root, split='train', download=None, ignore_archive=False, **kwargs):
if download is True:
msg = ("The dataset is no longer publicly accessible. You need to "
"download the archives externally and place them in the root "
"directory.")
raise RuntimeError(msg)
elif download is False:
msg = ("The use of the download flag is deprecated, since the dataset "
"is no longer publicly accessible.")
warnings.warn(msg, RuntimeWarning)
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
if not ignore_archive:
self.parse_archives()
wnid_to_classes = load_meta_file(self.root)[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
def parse_archives(self):
if not check_integrity(os.path.join(self.root, META_FILE)):
parse_devkit_archive(self.root)
if not os.path.isdir(self.split_folder):
if self.split == 'train':
parse_train_archive(self.root)
elif self.split == 'val':
parse_val_archive(self.root)
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class ImageNet_DeepAA(ImageNet):
def __init__(self, root, split='train', download=None, **kwargs):
super(ImageNet_DeepAA, self).__init__(root, split=split, download=download, ignore_archive=True, **kwargs)
_, self.labels_ = zip(*self.samples)
def on_epoch_end(self):
print('Dummy one_epoch_end for ImageNet dataset using torchvision')
pass
def sample_labeled_data_batch(self, label, val_bs): # generate val and train batch at the same time
matched_indices = [id for id, lab in enumerate(self.labels_) if lab==label]
matched_indices = np.array(matched_indices)
assert len(matched_indices) > val_bs, 'Make sure the have enough data'
np.random.shuffle(matched_indices)
val_indices = matched_indices[:val_bs]
val_samples, val_labels = zip(*[self[id] for id in val_indices])
val_samples = list(val_samples)
val_labels = np.array(val_labels, dtype=np.int32)
return val_samples, val_labels
class Subset_ImageNet(Subset):
def __init__(self, dataset, indices):
super(Subset_ImageNet, self).__init__(dataset, indices)
self.subset_labels_ = [self.dataset.labels_[k] for k in indices]
def on_epoch_end(self):
pass
def sample_labeled_data_batch(self, label, val_bs):
matched_indices = [self.indices[id] for id, lab in enumerate(self.subset_labels_) if lab == label]
matched_indices = np.array(matched_indices)
assert len(matched_indices) > val_bs, 'Make sure the have enough data'
np.random.shuffle(matched_indices)
val_indices = matched_indices[:val_bs]
val_samples, val_labels = zip(*[self.dataset[id] for id in val_indices]) # applies transforms
val_samples = list(val_samples)
val_labels = np.array(val_labels, dtype=np.int32)
return val_samples, val_labels
def random_split_ImageNet(dataset, lengths, generator=default_generator):
if sum(lengths) != len(dataset):
raise ValueError('Sum of input lengths does not equal the length of the input dataset')
indices = randperm(sum(lengths), generator=generator).tolist()
return [Subset_ImageNet(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths)]
def get_imagenet_split(val_size=400000, train_sep_size=100000, dataroot='./data', n_GPU=None, seed=300):
transform = lambda img: np.array(img, dtype=np.uint8)
total_trainset = ImageNet_DeepAA(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=transform)
testset = ImageNet_DeepAA(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=transform)
N_per_shard = (len(total_trainset) - val_size - train_sep_size)//n_GPU
remaining_data = len(total_trainset) - val_size - train_sep_size - n_GPU * N_per_shard
if remaining_data > 0:
splits = [val_size, train_sep_size, *[N_per_shard]*n_GPU, remaining_data]
else:
splits = [val_size, train_sep_size, *[N_per_shard]*n_GPU]
all_ds = random_split_ImageNet(total_trainset,
lengths=splits,
generator=torch.Generator().manual_seed(seed))
val_ds = all_ds[0]
train_ds_sep = all_ds[1]
pretrain_ds_splits = all_ds[2:2+n_GPU]
return total_trainset, val_ds, train_ds_sep, pretrain_ds_splits, testset
| 7,325
| 40.625
| 129
|
py
|
DeepAA
|
DeepAA-master/augmentation.py
|
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
# https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
from PIL import Image
import math
IMAGENET_SIZE = (224, 224) # (width, height) may set to (244, 224)
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
assert 4 <= v <= 8 # FastAA
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def RandCrop(img, _):
v = 4
return mean_pad_randcrop(img, v)
def RandCutout(img, _):
v = 16
w, h = img.size
x = random.uniform(0, w)
y = random.uniform(0, h)
x0 = int(min(w, max(0, x - v // 2))) # clip to the range (0, w)
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def RandCutout60(img, _):
v = 60
w, h = img.size
x_left = max(0, w // 2 - 256 // 2)
x_right = min(w, w // 2 + 256 // 2)
y_bottom = max(0, h // 2 - 256 // 2)
y_top = min(h, h // 2 + 256 // 2)
x = random.uniform(x_left, x_right)
y = random.uniform(y_bottom, y_top)
x0 = int(min(w, max(0, x - v // 2)))
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def RandFlip(img, _):
if random.random() > 0.5:
img = Flip(img, None)
return img
def mean_pad_randcrop(img, v):
# v: Pad with mean value=[125, 123, 114] by v pixels on each side and then take random crop
assert v <= 10, 'The maximum shift should be less then 10'
padded_size = (img.size[0] + 2*v, img.size[1] + 2*v)
new_img = PIL.Image.new('RGB', padded_size, color=(125, 123, 114))
new_img.paste(img, (v, v))
top = random.randint(0, v*2)
left = random.randint(0, v*2)
new_img = new_img.crop((left, top, left + img.size[0], top + img.size[1]))
return new_img
def Identity(img, v):
return img
def RandResizeCrop_imagenet(img, _):
# ported from torchvision
# for ImageNet use only
scale = (0.08, 1.0)
ratio = (3. / 4., 4. / 3.)
size = IMAGENET_SIZE # (224, 224)
def get_params(img, scale, ratio):
width, height = img.size
area = float(width * height)
log_ratio = [math.log(r) for r in ratio]
for _ in range(10):
target_area = area * random.uniform(scale[0], scale[1])
aspect_ratio = math.exp(random.uniform(log_ratio[0], log_ratio[1]))
w = round(math.sqrt(target_area * aspect_ratio))
h = round(math.sqrt(target_area / aspect_ratio))
if 0 < w <= width and 0 < h <= height:
top = random.randint(0, height - h)
left = random.randint(0, width - w)
return left, top, w, h
# fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = round(w / min(ratio))
elif in_ratio > max(ratio):
h = height
w = round(h * max(ratio))
else:
w = width
h = height
top = (height - h) // 2
left = (width - w) // 2
return left, top, w, h
left, top, w_box, h_box = get_params(img, scale, ratio)
box = (left, top, left + w_box, top + h_box)
img = img.resize(size=size, resample=PIL.Image.CUBIC, box=box)
return img
def Resize_imagenet(img, size):
w, h = img.size
if isinstance(size, int):
short, long = (w, h) if w <= h else (h, w)
if short == size:
return img
new_short, new_long = size, int(size * long / short)
new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
return img.resize((new_w, new_h), PIL.Image.BICUBIC)
elif isinstance(size, tuple) or isinstance(size, list):
assert len(size) == 2, 'Check the size {}'.format(size)
return img.resize(size, PIL.Image.BICUBIC)
else:
raise Exception
def centerCrop_imagenet(img, _):
# for ImageNet only
# https://github.com/pytorch/vision/blob/master/torchvision/transforms/functional.py
crop_width, crop_height = IMAGENET_SIZE # (224,224)
image_width, image_height = img.size
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
img = pad(img, padding_ltrb, fill=0)
image_width, image_height = img.size
if crop_width == image_width and crop_height == image_height:
return img
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))
# def centerCrop_imagenet_default(img):
# return centerCrop_imagenet(img, None)
def _parse_fill(fill, img, name="fillcolor"):
# Process fill color for affine transforms
num_bands = len(img.getbands())
if fill is None:
fill = 0
if isinstance(fill, (int, float)) and num_bands > 1:
fill = tuple([fill] * num_bands)
if isinstance(fill, (list, tuple)):
if len(fill) != num_bands:
msg = ("The number of elements in 'fill' does not match the number of "
"bands of the image ({} != {})")
raise ValueError(msg.format(len(fill), num_bands))
fill = tuple(fill)
return {name: fill}
def pad(img, padding_ltrb, fill=0, padding_mode='constant'):
if isinstance(padding_ltrb, list):
padding_ltrb = tuple(padding_ltrb)
if padding_mode == 'constant':
opts = _parse_fill(fill, img, name='fill')
if img.mode == 'P':
palette = img.getpalette()
image = PIL.ImageOps.expand(img, border=padding_ltrb, **opts)
image.putpalette(palette)
return image
return PIL.ImageOps.expand(img, border=padding_ltrb, **opts)
elif len(padding_ltrb) == 4:
image_width, image_height = img.size
cropping = -np.minimum(padding_ltrb, 0)
if cropping.any():
crop_left, crop_top, crop_right, crop_bottom = cropping
img = img.crop((crop_left, crop_top, image_width - crop_right, image_height - crop_bottom))
pad_left, pad_top, pad_right, pad_bottom = np.maximum(padding_ltrb, 0)
if img.mode == 'P':
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return Image.fromarray(img)
else:
raise Exception
def get_mid_magnitude(l_mags):
ops_mid_magnitude = {'Identity': None,
'ShearX': (l_mags - 1) // 2,
'ShearY': (l_mags - 1) // 2,
'TranslateX': (l_mags - 1) // 2,
'TranslateY': (l_mags - 1) // 2,
'Rotate': (l_mags - 1) // 2,
'AutoContrast': None,
'Invert': None,
'Equalize': None,
'Solarize': l_mags - 1,
'Posterize': l_mags - 1,
'Contrast': (l_mags - 1) // 2,
'Color': (l_mags - 1) // 2,
'Brightness': (l_mags - 1) // 2,
'Sharpness': (l_mags - 1) // 2,
'RandFlip': 'random',
'RandCutout': 'random',
'RandCutout60': 'random',
'RandCrop': 'random',
'RandResizeCrop_imagenet': 'random',
}
return ops_mid_magnitude
| 11,099
| 31.840237
| 103
|
py
|
DeepAA
|
DeepAA-master/data_generator.py
|
import os
import copy
import logging
import numpy as np
import math
from PIL import Image
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
tf.get_logger().setLevel(logging.ERROR)
from tensorflow.keras.utils import Sequence
from augmentation import IMAGENET_SIZE, centerCrop_imagenet
CIFAR_MEANS = np.array([0.49139968, 0.48215841, 0.44653091], dtype=np.float32)
CIFAR_STDS = np.array([0.2023, 0.1994, 0.2010], dtype=np.float32)
IMAGENET_MEANS = np.array([0.485, 0.456, 0.406], dtype=np.float32)
IMAGENET_STDS = np.array([0.229, 0.224, 0.225], dtype=np.float32)
def split_train_validation(x, y, val_size):
indices = np.arange(len(x))
np.random.shuffle(indices)
x_train, x_val, y_train, y_val = x[:-val_size], x[-val_size:], y[:-val_size], y[-val_size:]
return x_train, y_train, x_val, y_val
def get_cifar100_data(num_classes=100, val_size=10000):
(x_train_val, y_train_val), (x_test, y_test) = tf.keras.datasets.cifar100.load_data()
y_train_val = y_train_val.squeeze()
y_test = y_test.squeeze()
if val_size > 0:
x_train, y_train, x_val, y_val = split_train_validation(x_train_val, y_train_val, val_size=val_size)
else:
x_train, y_train = x_train_val, y_train_val
x_val, y_val = None, None
return x_train, y_train, x_val, y_val, x_test, y_test
def get_cifar10_data(num_classes=10, val_size=10000):
(x_train_val, y_train_val), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
y_train_val = y_train_val.squeeze()
y_test = y_test.squeeze()
if val_size > 0:
x_train, y_train, x_val, y_val = split_train_validation(x_train_val, y_train_val, val_size=val_size)
else:
x_train, y_train = x_train_val, y_train_val
x_val, y_val = None, None
return x_train, y_train, x_val, y_val, x_test, y_test
class DataGenerator(Sequence):
def __init__(self,
data,
labels,
img_dim=None,
batch_size=32,
num_classes=10,
shuffle=True,
drop_last=True,
):
self._data = data
self.data = self._data # initially without calling augment, the output data is not augmented
self.labels = labels
self.img_dim = img_dim
self.batch_size = batch_size
self.num_classes = num_classes
self.shuffle = shuffle
self.drop_last = drop_last
self.on_epoch_end()
def reset_augment(self):
self.data = self._data
def on_epoch_end(self):
self.indices = np.arange(len(self._data))
if self.shuffle:
np.random.shuffle(self.indices)
def sample_labeled_data_batch(self, label, bs):
# suffle indices every time
indices = np.arange(len(self._data))
np.random.shuffle(indices)
if isinstance(self.labels, list):
labels = [self.labels[k] for k in indices]
else:
labels = self.labels[indices]
matched_labels = np.array(labels) == int(label)
matched_indices = [id for id, isMatched in enumerate(matched_labels) if isMatched]
if len(matched_indices) - bs >=0:
start_idx = np.random.randint(0, len(matched_indices)-bs)
batch_indices = matched_indices[start_idx:start_idx + bs]
else:
print('Not enough matched data, required {}, but got {} instead'.format(bs, len(matched_indices)))
batch_indices = matched_indices
data_indices = indices[batch_indices]
return [self.data[k] for k in data_indices], np.array([self.labels[k] for k in data_indices], dtype=self.labels[0].dtype)
def __len__(self):
if self.drop_last:
return int(np.floor(len(self.data) / self.batch_size)) # drop the last batch
else:
return int(np.ceil(len(self.data) / self.batch_size)) # drop the last batch
def __getitem__(self, idx):
curr_batch = self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
batch_len = len(curr_batch)
if isinstance(self.data, list) and isinstance(self.labels, list):
return [self.data[k] for k in curr_batch], np.array([self.labels[k] for k in curr_batch], np.int32)
else:
return self.data[curr_batch], self.labels[curr_batch]
class DataAugmentation(object):
def __init__(self, num_classes, dataset, image_shape, ops_list=None, default_pre_aug=None, default_post_aug=None):
self.ops, self.op_names = ops_list
self.default_pre_aug = default_pre_aug
self.default_post_aug = default_post_aug
self.num_classes = num_classes
self.dataset = dataset
self.image_shape = image_shape
if 'imagenet' in self.dataset:
assert self.image_shape == (*IMAGENET_SIZE, 3)
elif 'cifar' in self.dataset:
assert self.image_shape == (32, 32, 3)
else:
raise Exception('Unrecognized dataset')
def sequantially_augment(self, args):
idx, img_, op_idxs, mags, aug_finish = args
assert img_.dtype == np.uint8, 'Input images should be unporocessed, should stay in np.uint8'
img = copy.deepcopy(img_)
pil_img = Image.fromarray(img) # Convert to PIL.Image
if self.default_pre_aug is not None:
for op in self.default_pre_aug:
pil_img = op(pil_img)
if self.ops is not None:
for op_idx, mag in zip(op_idxs, mags):
op, minval, maxval = self.ops[op_idx]
assert mag > -1e-5 and mag < 1. + 1e-5, 'magnitudes should be in the range of (0., 1.)'
mag = mag * (maxval - minval) + minval
pil_img = op(pil_img, mag)
if self.default_post_aug is not None and self.use_post_aug:
for op in self.default_post_aug:
pil_img = op(pil_img, None)
if 'cifar' in self.dataset:
img = np.asarray(pil_img, dtype=np.uint8)
return idx, img
elif 'imagenet' in self.dataset:
if aug_finish:
pil_img = self.crop_IMAGENET(pil_img)
img = np.asarray(pil_img, dtype=np.uint8)
return idx, img
else:
raise Exception
def postprocessing_standardization(self, pil_img):
x = np.asarray(pil_img, dtype=np.float32) / 255.
if 'cifar' in self.dataset:
x = (x - CIFAR_MEANS) / CIFAR_STDS
elif 'imagenet' in self.dataset:
x = (x - IMAGENET_MEANS) / IMAGENET_STDS
else:
raise Exception('Unrecoginized dataset')
return x
def crop_IMAGENET(self, img):
# cropping imagenet dataset to the same size
if isinstance(img, np.ndarray):
assert img.shape == (IMAGENET_SIZE[1], IMAGENET_SIZE[0], 3) and img.dtype==np.uint8, 'numpy array should be {}, but got {}. crop_IMAGENET does not apply to numpy array, but got {}'.format(IMAGENET_SIZE, img.size, img.dtype)
return img
w, h = img.size
if w == IMAGENET_SIZE[0] and h == IMAGENET_SIZE[1]:
return img
return centerCrop_imagenet(img, None)
def check_data_type(self, images, labels):
assert images[0].dtype == np.uint8
if 'imagenet' in self.dataset:
assert type(labels[0]) == np.int32
elif 'cifar' in self.dataset:
assert type(labels[0]) == np.uint8
else:
raise Exception('Unrecognized dataset')
def __call__(self, images, labels, samples_op, samples_mag, use_post_aug, pool=None, chunksize=None, aug_finish=True):
self.check_data_type(images, labels)
self.use_post_aug = use_post_aug
self.batch_len = len(labels)
if aug_finish:
aug_imgs = np.empty([self.batch_len, *self.image_shape], dtype=np.float32)
else:
aug_imgs = [None]*self.batch_len
aug_results = pool.imap_unordered(self.sequantially_augment,
zip(range(self.batch_len), images, samples_op, samples_mag, [aug_finish]*self.batch_len),
chunksize=math.ceil(float(self.batch_len) / float(pool._processes)) if chunksize is None else chunksize)
for idx, img in aug_results:
aug_imgs[idx] = img
if aug_finish:
aug_imgs = self.postprocessing_standardization(aug_imgs)
return aug_imgs, labels
| 8,476
| 41.174129
| 235
|
py
|
DeepAA
|
DeepAA-master/resnet.py
|
import os
import tensorflow as tf
# ref: https://github.com/gahaalt/resnets-in-tensorflow2/blob/master/Models/Resnets.py
_bn_momentum = 0.9
def regularized_padded_conv(*args, **kwargs):
return tf.keras.layers.Conv2D(*args, **kwargs, padding='same', kernel_regularizer=_regularizer, bias_regularizer=_regularizer,
kernel_initializer='he_normal', use_bias=True)
def bn_relu(x):
x = tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(x)
return tf.keras.layers.ReLU()(x)
def shortcut(x, filters, stride, mode):
if x.shape[-1] == filters: # maybe and stride==1
return x
elif mode == 'B':
return regularized_padded_conv(filters, 1, strides=stride)(x)
elif mode == 'B_original':
x = regularized_padded_conv(filters, 1, strides=stride)(x)
return tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(x)
elif mode == 'A':
return tf.pad(tf.keras.layers.MaxPool2D(1, stride)(x) if stride > 1 else x,
paddings=[(0, 0), (0, 0), (0, 0), (0, filters - x.shape[-1])])
else:
raise KeyError("Parameter shortcut_type not recognized!")
def original_block(x, filters, stride=1, **kwargs):
c1 = regularized_padded_conv(filters, 3, strides=stride)(x)
c2 = regularized_padded_conv(filters, 3)(bn_relu(c1))
c2 = tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(c2)
mode = 'B_original' if _shortcut_type == 'B' else _shortcut_type
x = shortcut(x, filters, stride, mode=mode)
return tf.keras.layers.ReLU()(x + c2)
def preactivation_block(x, filters, stride=1, preact_block=False):
flow = bn_relu(x)
c1 = regularized_padded_conv(filters, 3)(flow)
if _dropout:
c1 = tf.keras.layers.Dropout(_dropout)(c1)
c2 = regularized_padded_conv(filters, 3, strides=stride)(bn_relu(c1))
x = shortcut(x, filters, stride, mode=_shortcut_type)
return x + c2
def bootleneck_block(x, filters, stride=1, preact_block=False):
flow = bn_relu(x)
if preact_block:
x = flow
c1 = regularized_padded_conv(filters // _bootleneck_width, 1)(flow)
c2 = regularized_padded_conv(filters // _bootleneck_width, 3, strides=stride)(bn_relu(c1))
c3 = regularized_padded_conv(filters, 1)(bn_relu(c2))
x = shortcut(x, filters, stride, mode=_shortcut_type)
return x + c3
def group_of_blocks(x, block_type, num_blocks, filters, stride, block_idx=0):
global _preact_shortcuts
preact_block = True if _preact_shortcuts or block_idx == 0 else False
x = block_type(x, filters, stride, preact_block=preact_block)
for i in range(num_blocks - 1):
x = block_type(x, filters)
return x
def Resnet(input_shape, n_classes, l2_reg=1e-4, group_sizes=(2, 2, 2), features=(16, 32, 64), strides=(1, 2, 2),
shortcut_type='B', block_type='preactivated', first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
dropout=0, cardinality=1, bootleneck_width=4, preact_shortcuts=True,
final_dense_kernel_initializer=None, final_dense_bias_initializer=None):
global _regularizer, _shortcut_type, _preact_projection, _dropout, _cardinality, _bootleneck_width, _preact_shortcuts
_bootleneck_width = bootleneck_width # used in ResNeXts and bootleneck blocks
_regularizer = tf.keras.regularizers.l2(l2_reg)
_shortcut_type = shortcut_type # used in blocks
_cardinality = cardinality # used in ResNeXts
_dropout = dropout # used in Wide ResNets
_preact_shortcuts = preact_shortcuts
block_types = {'preactivated': preactivation_block,
'bootleneck': bootleneck_block,
'original': original_block}
selected_block = block_types[block_type]
inputs = tf.keras.layers.Input(shape=input_shape)
flow = regularized_padded_conv(**first_conv)(inputs)
if block_type == 'original':
flow = bn_relu(flow)
for block_idx, (group_size, feature, stride) in enumerate(zip(group_sizes, features, strides)):
flow = group_of_blocks(flow,
block_type=selected_block,
num_blocks=group_size,
block_idx=block_idx,
filters=feature,
stride=stride)
if block_type != 'original':
flow = bn_relu(flow)
flow = tf.keras.layers.GlobalAveragePooling2D()(flow)
if final_dense_kernel_initializer is not None:
assert final_dense_bias_initializer is not None, 'make sure kernel and bias initializer is not None at the same time'
outputs = tf.keras.layers.Dense(n_classes, kernel_regularizer=_regularizer,
kernel_initializer=final_dense_kernel_initializer,
bias_initializer=final_dense_bias_initializer)(flow)
else:
outputs = tf.keras.layers.Dense(n_classes, kernel_regularizer=_regularizer)(flow)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
def load_weights_func(model, model_name):
try:
model.load_weights(os.path.join('saved_models', model_name + '.tf'))
except tf.errors.NotFoundError:
print("No weights found for this model!")
return model
def cifar_resnet20(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None, n_classes=None):
model = Resnet(input_shape=input_shape, n_classes=n_classes, l2_reg=l2_reg, group_sizes=(3, 3, 3), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet20')
return model
def cifar_resnet32(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(5, 5, 5), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet32')
return model
def cifar_resnet44(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(7, 7, 7), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet44')
return model
def cifar_resnet56(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(9, 9, 9), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet56')
return model
def cifar_resnet110(block_type='preactivated', shortcut_type='B', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(18, 18, 18),
features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet110')
return model
def cifar_resnet164(shortcut_type='B', load_weights=False, l2_reg=1e-4, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(18, 18, 18),
features=(64, 128, 256),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type='bootleneck', preact_shortcuts=True)
if load_weights: model = load_weights_func(model, 'cifar_resnet164')
return model
def cifar_resnet1001(shortcut_type='B', load_weights=False, l2_reg=1e-4, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(111, 111, 111),
features=(64, 128, 256),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type='bootleneck', preact_shortcuts=True)
if load_weights: model = load_weights_func(model, 'cifar_resnet1001')
return model
def cifar_wide_resnet(N, K, block_type='preactivated', shortcut_type='B', dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
assert (N - 4) % 6 == 0, "N-4 has to be divisible by 6"
lpb = (N - 4) // 6 # layers per block - since N is total number of convolutional layers in Wide ResNet
model = Resnet(input_shape=input_shape, n_classes=n_classes, l2_reg=l2_reg, group_sizes=(lpb, lpb, lpb),
features=(16 * K, 32 * K, 64 * K),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, dropout=dropout, preact_shortcuts=preact_shortcuts)
return model
def cifar_WRN_16_4(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, input_shape=None):
model = cifar_wide_resnet(16, 4, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, input_shape=input_shape)
if load_weights: model = load_weights_func(model, 'cifar_WRN_16_4')
return model
def cifar_WRN_40_4(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, input_shape=None):
model = cifar_wide_resnet(40, 4, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, input_shape=input_shape)
if load_weights: model = load_weights_func(model, 'cifar_WRN_40_4')
return model
def cifar_WRN_16_8(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, input_shape=None):
model = cifar_wide_resnet(16, 8, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, input_shape=input_shape)
if load_weights: model = load_weights_func(model, 'cifar_WRN_16_8')
return model
def cifar_WRN_28_10(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
model = cifar_wide_resnet(28, 10, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, n_classes = n_classes, preact_shortcuts=preact_shortcuts, input_shape=input_shape)
return model
def cifar_WRN_28_2(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
model = cifar_wide_resnet(28, 2, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, n_classes = n_classes, preact_shortcuts=preact_shortcuts, input_shape=input_shape)
return model
def cifar_WRN_40_2(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
model = cifar_wide_resnet(40, 2, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, n_classes = n_classes, preact_shortcuts=preact_shortcuts, input_shape=input_shape)
return model
def cifar_resnext(N, cardinality, width, shortcut_type='B', ):
assert (N - 3) % 9 == 0, "N-4 has to be divisible by 6"
lpb = (N - 3) // 9 # layers per block - since N is total number of convolutional layers in Wide ResNet
model = Resnet(input_shape=(32, 32, 3), n_classes=10, l2_reg=1e-4, group_sizes=(lpb, lpb, lpb),
features=(16 * width, 32 * width, 64 * width),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type='resnext', cardinality=cardinality, width=width)
return model
if __name__ == '__main__':
model = cifar_WRN_28_10(dropout=0, l2_reg=5e-4/2., preact_shortcuts=False, n_classes=10)
| 12,655
| 49.624
| 183
|
py
|
DeepAA
|
DeepAA-master/utils.py
|
import os
import logging
import numpy as np
import matplotlib
# configure backend here
matplotlib.use('Agg')
# matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from mpl_toolkits.axes_grid1 import ImageGrid
import tensorflow as tf
import math
import sys
from data_generator import CIFAR_MEANS, CIFAR_STDS
gfile = tf.io.gfile
class Logger(object):
"""Prints to both STDOUT and a file."""
def __init__(self, filepath):
self.terminal = sys.stdout
self.log = gfile.GFile(filepath, 'a+')
def write(self, message):
self.terminal.write(message)
self.terminal.flush()
self.log.write(message)
self.log.flush()
def flush(self):
self.terminal.flush()
self.log.flush()
class CTLEarlyStopping:
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
mode='auto',
):
self.monitor = monitor
self.patience = patience
self.min_delta = abs(min_delta)
self.wait = 0
self.stop_training = False
self.improvement = False
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def check_progress(self, current):
if self.monitor_op(current - self.min_delta, self.best):
print(f"{self.monitor} improved from {self.best:.4f} to {current:.4f}.", end=" ")
self.best = current
self.wait = 0
self.improvement = True
else:
self.wait += 1
self.improvement = False
print(f"{self.monitor} didn't improve")
if self.wait >= self.patience:
print("Early stopping")
self.stop_training = True
return self.improvement, self.stop_training
##########################################################################################
class CTLHistory:
def __init__(self,
filename=None,
save_dir='plots'):
self.history = {'train_loss':[],
"train_acc":[],
"val_loss":[],
"val_acc":[],
"lr":[],
"wd":[]}
self.save_dir = save_dir
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
try:
filename = 'history_cuda.png'
except:
filename = 'history.png' if filename is None else filename
self.plot_name = os.path.join(self.save_dir, filename)
def update(self, train_stats, val_stats, record_lr_wd):
train_loss, train_acc = train_stats
val_loss, val_acc = val_stats
lr_history, wd_history = record_lr_wd
self.history['train_loss'].append(train_loss)
self.history['train_acc'].append(np.round(train_acc*100))
self.history['val_loss'].append(val_loss)
self.history['val_acc'].append(np.round(val_acc*100))
self.history['lr'].extend(lr_history)
self.history['wd'].extend(wd_history)
def plot_and_save(self, initial_epoch=0):
train_loss = self.history['train_loss']
train_acc = self.history['train_acc']
val_loss = self.history['val_loss']
val_acc = self.history['val_acc']
epochs = [(i+initial_epoch) for i in range(len(train_loss))]
f, ax = plt.subplots(3, 1, figsize=(15,8))
ax[0].plot(epochs, train_loss)
ax[0].plot(epochs, val_loss)
ax[0].set_title('loss progression')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('loss values')
ax[0].legend(['train', 'test'])
ax[1].plot(epochs, train_acc)
ax[1].plot(epochs, val_acc)
ax[1].set_title('accuracy progression')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Accuracy')
ax[1].legend(['train', 'test'])
steps = len(self.history['lr'])
bs = steps/len(train_loss)
ax[2].plot([s/bs for s in range(steps)], self.history['lr'])
ax[2].plot([s/bs for s in range(steps)], self.history['wd'])
ax[2].set_title('learning rate and weight decay')
ax[2].set_xlabel('Epochs')
ax[2].set_ylabel('lr and wd')
ax[2].legend(['lr', 'wd'])
plt.savefig(self.plot_name)
plt.close()
def repeat(x, n, axis):
if isinstance(x, np.ndarray):
return np.repeat(x, n, axis=axis)
elif isinstance(x, list):
return repeat_list(x, n, axis)
else:
raise Exception('Unsupport data type {}'.format(type(x)))
def repeat_list(x, n, axis):
assert isinstance(x, list), 'Can only consume list type'
if axis == 0:
x_new = sum([[x_] * n for x_ in x], [])
elif axis > 1:
x_new = [repeat(x_, n, axis=axis - 1) for x_ in x]
else:
raise Exception
return x_new
def tile(x):
return None
| 5,620
| 29.548913
| 93
|
py
|
DeepAA
|
DeepAA-master/policy.py
|
import tensorflow as tf
import numpy as np
import math
import json
from tensorflow_probability import distributions as tfd
from resnet import Resnet
CIFAR_MEANS = np.array([0.49139968, 0.48215841, 0.44653091], dtype=np.float32)
CIFAR_STDS = np.array([0.2023, 0.1994, 0.2010], dtype=np.float32)
SVHN_MEANS = np.array([0.4379, 0.4440, 0.4729], dtype=np.float32)
SVHN_STDS = np.array([0.1980, 0.2010, 0.1970], dtype=np.float32)
IMAGENET_MEANS = np.array([0.485, 0.456, 0.406], dtype=np.float32)
IMAGENET_STDS = np.array([0.229, 0.224, 0.225], dtype=np.float32)
class DA_Policy_logits(tf.keras.Model):
def __init__(self, l_ops, l_mags, l_uniq, op_names, ops_mid_magnitude,
N_repeat_random, available_policies, policy_init='identity'):
super().__init__()
self.l_uniq = l_uniq
self.l_ops = l_ops
self.l_mags = l_mags
self.N_repeat_random = N_repeat_random
self.available_policies = available_policies
if policy_init == 'uniform':
init_value = tf.constant([0.0]*len(available_policies), dtype=tf.float32)
elif policy_init == 'identity':
init_value = tf.constant([8.0] + [0.0]*(len(available_policies)-1), dtype=tf.float32)
init_value = init_value - tf.reduce_mean(init_value)
else:
raise Exception
self.logits = tf.Variable(initial_value=init_value, trainable=True)
self.ops_mid_magnitude = ops_mid_magnitude
self.unique_policy = self._get_unique_policy(op_names, l_ops, l_mags)
self.N_random, self.repeat_cfg, self.reduce_random_mat = self._get_repeat_random(op_names, l_ops, l_mags,
l_uniq, N_repeat_random)
self.act = tf.nn.softmax
def sample(self, images_orig, images, onehot_ops_mags, augNum):
bs = len(images_orig)
probs = self.act(self.logits, axis=-1)
dist = tfd.Categorical(probs=probs)
samples_om = dist.sample(augNum*bs).numpy() # (augNum, bs)
ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp = self.get_dense_aug(images, repeat_random_ops=False)
ops = ops_dense[samples_om]
mags = mags_dense[samples_om]
ops_mags_idx_sample = ops_mags_idx[samples_om]
probs_sample = probs.numpy()[samples_om]
return ops, mags, ops_mags_idx_sample, probs_sample
def probs(self, images_orig, images, onehot_ops_mags, training):
bs = len(images_orig)
probs = self.act(self.logits, axis=-1)
probs = tf.repeat(probs[tf.newaxis], bs, axis=0)
return probs
def get_dense_aug(self, images, repeat_random_ops):
ops_uniq, mags_uniq = self.unique_policy
ops_dense = np.squeeze(ops_uniq)[self.available_policies]
mags_dense = np.squeeze(mags_uniq)[self.available_policies]
ops_mags_idx = self.available_policies
if repeat_random_ops:
isRepeat = [np.any(np.array(ops_dense == repeat_op_idx), axis=1) for repeat_op_idx in self.repeat_ops_idx]
isRepeat = np.stack(isRepeat, axis=1)
isRepeat = np.any(isRepeat, axis=1)
nRepeat = [self.N_repeat_random if isrepeat else 1 for isrepeat in isRepeat]
ops_dense = np.repeat(ops_dense, nRepeat, axis=0)
mags_dense = np.repeat(mags_dense, nRepeat, axis=0)
reduce_random_mat = np.eye(len(self.available_policies)) / np.array(nRepeat, dtype=np.float32)
reduce_random_mat = np.repeat(reduce_random_mat, nRepeat, axis=1)
else:
nRepeat = [1] * len(self.available_policies)
reduce_random_mat = np.eye(len(self.available_policies))
probs = self.act(self.logits)
probs_exp = np.repeat(probs/np.array(nRepeat, dtype=np.float32), nRepeat, axis=0)
return ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp
def _get_unique_policy(self, op_names, l_ops, l_mags):
names_modified = [op_name.split(':')[0] for op_name in op_names]
ops_list, mags_list = [], []
repeat_ops_idx = []
for k_name, name in enumerate(names_modified):
if self.ops_mid_magnitude[name] == 'random':
repeat_ops_idx.append(k_name)
ops_sub, mags_sub = np.array([[k_name]], dtype=np.int32), np.array([[(l_mags - 1) // 2]], dtype=np.int32)
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name]>=0 and self.ops_mid_magnitude[name]<=l_mags-1:
ops_sub = k_name * np.ones([l_mags - 1, 1], dtype=np.int32)
mags_sub = np.array([l for l in range(l_mags) if l != self.ops_mid_magnitude[name]], dtype=np.int32)[:, np.newaxis]
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name]<0: #or self.ops_mid_magnitude[name]>l_mags-1):
ops_sub = k_name * np.ones([l_mags, 1], dtype=np.int32)
mags_sub = np.arange(l_mags, dtype=np.int32)[:, np.newaxis]
elif self.ops_mid_magnitude[name] is None:
ops_sub, mags_sub = np.array([[k_name]], dtype=np.int32), np.array([[(l_mags - 1) // 2]], dtype=np.int32)
else:
raise Exception('Unrecognized middle magnitude')
ops_list.append(ops_sub)
mags_list.append(mags_sub)
ops = np.concatenate(ops_list, axis=0)
mags = np.concatenate(mags_list, axis=0)
self.repeat_ops_idx = repeat_ops_idx
return ops.astype(np.int32), mags.astype(np.int32)
def _get_repeat_random(self, op_names, l_ops, l_mags, l_uniq, N_repeat_random):
names_modified = [op_name.split(':')[0] for op_name in op_names]
N_random = sum([1 for name in names_modified if self.ops_mid_magnitude[name]=='random'])
repeat_cfg = []
for k_name, name in enumerate(names_modified):
if self.ops_mid_magnitude[name] == 'random':
repeat_cfg.append(N_repeat_random) # we may repeat random operations for N_repeat_random times
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name] == -1:
repeat_cfg.append([1]*l_mags)
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name] >= 0 and self.ops_mid_magnitude[name]<=l_mags-1:
repeat_cfg.extend([1]*(l_mags-1))
elif self.ops_mid_magnitude[name] is None:
repeat_cfg.append(1)
else:
raise Exception
repeat_cfg = np.array(repeat_cfg, dtype=np.int32)
reduce_mat = np.eye(l_uniq)/repeat_cfg[np.newaxis].astype(np.float)
reduce_mat = np.repeat(reduce_mat, repeat_cfg, axis=1)
return N_random, repeat_cfg, reduce_mat
@property
def idx_removed_redundant(self):
idx_removed_redundant = np.concatenate([[1] if rep == 1 else [1]+[0]*(rep-1) for rep in self.repeat_cfg ]).nonzero()[0]
assert len(idx_removed_redundant) == self.l_uniq, 'removing the repeated random operations'
return idx_removed_redundant
| 7,142
| 51.138686
| 139
|
py
|
DeepAA
|
DeepAA-master/aug_lib.py
|
import numpy as np
import re
import PIL
from PIL import ImageOps, ImageEnhance, ImageFilter, Image, ImageDraw
import random
from dataclasses import dataclass
from typing import Union
@dataclass
class MinMax:
min: Union[float, int]
max: Union[float, int]
@dataclass
class MinMaxVals:
shear: MinMax = MinMax(.0, .3)
translate: MinMax = MinMax(0, 10) # different from uniaug: MinMax(0,14.4)
rotate: MinMax = MinMax(0, 30)
solarize: MinMax = MinMax(0, 256)
posterize: MinMax = MinMax(0, 4) # different from uniaug: MinMax(4,8)
enhancer: MinMax = MinMax(.1, 1.9)
cutout: MinMax = MinMax(.0, .2)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / PARAMETER_MAX)
class TransformFunction(object):
"""Wraps the Transform function for pretty printing options."""
def __init__(self, func, name):
self.f = func
self.name = name
def __repr__(self):
return '<' + self.name + '>'
def __call__(self, pil_img):
return self.f(pil_img)
class TransformT(object):
"""Each instance of this class represents a specific transform."""
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def __repr__(self):
return '<' + self.name + '>'
def pil_transformer(self, probability, level):
def return_function(im):
if random.random() < probability:
im = self.xform(im, level)
return im
name = self.name + '({:.1f},{})'.format(probability, level)
return TransformFunction(return_function, name)
################## Transform Functions ##################
identity = TransformT('identity', lambda pil_img, level: pil_img)
flip_lr = TransformT(
'FlipLR',
lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT))
flip_ud = TransformT(
'FlipUD',
lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM))
# pylint:disable=g-long-lambda
auto_contrast = TransformT(
'AutoContrast',
lambda pil_img, level: ImageOps.autocontrast(
pil_img))
equalize = TransformT(
'Equalize',
lambda pil_img, level: ImageOps.equalize(
pil_img))
invert = TransformT(
'Invert',
lambda pil_img, level: ImageOps.invert(
pil_img))
# pylint:enable=g-long-lambda
blur = TransformT(
'Blur', lambda pil_img, level: pil_img.filter(ImageFilter.BLUR))
smooth = TransformT(
'Smooth',
lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH))
def _rotate_impl(pil_img, level):
"""Rotates `pil_img` from -30 to 30 degrees depending on `level`."""
degrees = int_parameter(level, min_max_vals.rotate.max)
if random.random() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees)
rotate = TransformT('Rotate', _rotate_impl)
def _posterize_impl(pil_img, level):
"""Applies PIL Posterize to `pil_img`."""
level = int_parameter(level, min_max_vals.posterize.max - min_max_vals.posterize.min)
return ImageOps.posterize(pil_img, min_max_vals.posterize.max - level)
posterize = TransformT('Posterize', _posterize_impl)
def _shear_x_impl(pil_img, level):
"""Applies PIL ShearX to `pil_img`.
The ShearX operation shears the image along the horizontal axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, min_max_vals.shear.max)
if random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, level, 0, 0, 1, 0))
shear_x = TransformT('ShearX', _shear_x_impl)
def _shear_y_impl(pil_img, level):
"""Applies PIL ShearY to `pil_img`.
The ShearY operation shears the image along the vertical axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, min_max_vals.shear.max)
if random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, level, 1, 0))
shear_y = TransformT('ShearY', _shear_y_impl)
def _translate_x_impl(pil_img, level):
"""Applies PIL TranslateX to `pil_img`.
Translate the image in the horizontal direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateX applied to it.
"""
level = int_parameter(level, min_max_vals.translate.max)
if random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, level, 0, 1, 0))
translate_x = TransformT('TranslateX', _translate_x_impl)
def _translate_y_impl(pil_img, level):
"""Applies PIL TranslateY to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateY applied to it.
"""
level = int_parameter(level, min_max_vals.translate.max)
if random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, 0, 1, level))
translate_y = TransformT('TranslateY', _translate_y_impl)
def _crop_impl(pil_img, level, interpolation=Image.BILINEAR):
"""Applies a crop to `pil_img` with the size depending on the `level`."""
level = int_parameter(level, 10)
w = pil_img.width
h = pil_img.height
cropped = pil_img.crop((level, level, w - level, h - level))
resized = cropped.resize((w, h), interpolation)
return resized
crop_bilinear = TransformT('CropBilinear', _crop_impl)
def _solarize_impl(pil_img, level):
"""Applies PIL Solarize to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had Solarize applied to it.
"""
level = int_parameter(level, min_max_vals.solarize.max)
return ImageOps.solarize(pil_img, 256 - level)
solarize = TransformT('Solarize', _solarize_impl)
def _enhancer_impl(enhancer, minimum=None, maximum=None):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL."""
def impl(pil_img, level):
mini = min_max_vals.enhancer.min if minimum is None else minimum
maxi = min_max_vals.enhancer.max if maximum is None else maximum
v = float_parameter(level, maxi - mini) + mini # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
# for stacked_TA_cifar
def _mean_pad_randcrop(img, v):
# v: Pad with mean value=[125, 123, 114] by v pixels on each side and then take random crop
assert v <= 10, 'The maximum shift should be less then 10'
padded_size = (img.size[0] + 2*v, img.size[1] + 2*v)
new_img = Image.new('RGB', padded_size, color=(125, 123, 114))
new_img.paste(img, (v, v))
top = random.randint(0, v*2)
left = random.randint(0, v*2)
new_img = new_img.crop((left, top, left + img.size[0], top + img.size[1]))
return new_img
color = TransformT('Color', _enhancer_impl(ImageEnhance.Color))
ohl_color = TransformT('Color', _enhancer_impl(ImageEnhance.Color, .3, .9))
contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast))
brightness = TransformT('Brightness', _enhancer_impl(
ImageEnhance.Brightness))
sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness))
contour = TransformT(
'Contour', lambda pil_img, level: pil_img.filter(ImageFilter.CONTOUR))
detail = TransformT(
'Detail', lambda pil_img, level: pil_img.filter(ImageFilter.DETAIL))
edge_enhance = TransformT(
'EdgeEnhance', lambda pil_img, level: pil_img.filter(ImageFilter.EDGE_ENHANCE))
sharpen = TransformT(
'Sharpen', (lambda pil_img, level: pil_img.filter(ImageFilter.SHARPEN)))
max_ = TransformT(
'Max', lambda pil_img, level: pil_img.filter(ImageFilter.MaxFilter))
min_ = TransformT(
'Min', lambda pil_img, level: pil_img.filter(ImageFilter.MinFilter))
median = TransformT(
'Median', lambda pil_img, level: pil_img.filter(ImageFilter.MedianFilter))
gaussian = TransformT(
'Gaussian', lambda pil_img, level: pil_img.filter(ImageFilter.GaussianBlur))
def _mirrored_enhancer_impl(enhancer, minimum=None, maximum=None):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL."""
def impl(pil_img, level):
mini = min_max_vals.enhancer.min if minimum is None else minimum
maxi = min_max_vals.enhancer.max if maximum is None else maximum
assert mini == 0., "This enhancer is used with a strength space that is mirrored around one."
v = float_parameter(level, maxi - mini) + mini # going to 0 just destroys it
if random.random() < .5:
v = -v
return enhancer(pil_img).enhance(1. + v)
return impl
mirrored_color = TransformT('Color', _mirrored_enhancer_impl(ImageEnhance.Color))
mirrored_contrast = TransformT('Contrast', _mirrored_enhancer_impl(ImageEnhance.Contrast))
mirrored_brightness = TransformT('Brightness', _mirrored_enhancer_impl(
ImageEnhance.Brightness))
mirrored_sharpness = TransformT('Sharpness', _mirrored_enhancer_impl(ImageEnhance.Sharpness))
def CutoutDefault(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v <= 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (0, 0, 0)
img = img.copy()
ImageDraw.Draw(img).rectangle(xy, color)
return img
def RandCutout(img, v): # Used in FastAA, different from CutoutABS, the actual cutout size can be smaller than v on the boundary
# Passed random number generation test
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
# x = np.random.uniform(w)
# y = np.random.uniform(h)
if v <= 16: # for cutout of cifar and SVHN
assert w == h == 32
x = random.uniform(0, w)
y = random.uniform(0, h)
x0 = int(min(w, max(0, x - v // 2))) # clip to the range (0, w)
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
# img = CutoutAbs(img, v)
return img
else:
IMAGENET_SIZE_resize = (256, 256)
if w>120 or h>120:
# make sure that the center of cutout is within the center (256, 256) box
x_left = max(0, w//2-IMAGENET_SIZE_resize[0]//2)
x_right = min(w, w//2+IMAGENET_SIZE_resize[0]//2)
y_bottom = max(0, h//2-IMAGENET_SIZE_resize[1]//2)
y_top = min(h, h//2+IMAGENET_SIZE_resize[1]//2)
x = random.uniform(x_left, x_right)
y = random.uniform(y_bottom, y_top)
x0 = int(min(w, max(0, x - v // 2))) # clip to the range (0, w)
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
return img
cutout = TransformT('Cutout',
lambda img, l: CutoutDefault(img, int_parameter(l, img.size[0] * min_max_vals.cutout.max)))
# for stacked_TA_cifar
mean_pad4_randcrop = TransformT('RandCrop', lambda img, l: _mean_pad_randcrop(img, 4))
# cutout16 = TransformT('Cutout', lambda img, l: CutoutDefault(img, 16))
# cutout60 = TransformT('Cutout', lambda img, l: CutoutDefault(img, 60))
cutout16 = TransformT('Cutout', lambda img, l: RandCutout(img, 16))
cutout60 = TransformT('Cutout', lambda img, l: RandCutout(img, 60))
flip_lr_stackedTA = TransformT('flip_lr', lambda img, l: PIL.ImageOps.mirror(img))
blend_images = None
def blend(img1, v):
if blend_images is None:
print("please set google_transformations.blend_images before using the enlarged_randaug search space.")
i = np.random.choice(len(blend_images))
img2 = blend_images[i]
m = float_parameter(v, .4)
return Image.blend(img1, img2, m)
sample_pairing = TransformT('SamplePairing', blend)
def set_augmentation_space(augmentation_space, num_strengths, custom_augmentation_space_augs=None):
global ALL_TRANSFORMS, min_max_vals, PARAMETER_MAX
assert num_strengths > 0
PARAMETER_MAX = num_strengths - 1
if 'wide' in augmentation_space:
min_max_vals = MinMaxVals(
shear=MinMax(.0, .99),
translate=MinMax(0, 32),
rotate=MinMax(0, 135),
solarize=MinMax(0, 256),
posterize=MinMax(2, 8),
enhancer=MinMax(.01, 2.),
cutout=MinMax(.0, .6),
)
elif ('uniaug' in augmentation_space) or ('randaug' in augmentation_space):
min_max_vals = MinMaxVals(
posterize=MinMax(4, 8),
translate=MinMax(0, 14.4)
)
elif 'fixmirror' in augmentation_space:
min_max_vals = MinMaxVals(
posterize=MinMax(4, 8),
enhancer=MinMax(0., .9)
)
elif 'fiximagenet' in augmentation_space:
min_max_vals = MinMaxVals(
posterize=MinMax(4, 8),
translate=MinMax(0, 70)
)
elif 'fix' in augmentation_space:
min_max_vals = MinMaxVals(
posterize=MinMax(4, 8)
)
elif 'ohl' in augmentation_space:
assert PARAMETER_MAX == 2
min_max_vals = MinMaxVals(
shear=MinMax(.1, .3),
translate=MinMax(5, 14),
rotate=MinMax(10, 30),
solarize=MinMax(26, 179),
posterize=MinMax(4, 7),
enhancer=MinMax(1.3, 1.9),
cutout=MinMax(.0, .6),
)
else:
min_max_vals = MinMaxVals()
if 'xlong' in augmentation_space:
ALL_TRANSFORMS = [
identity,
auto_contrast,
equalize,
rotate,
solarize,
color,
posterize,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
blur,
invert,
flip_lr,
flip_ud,
cutout,
crop_bilinear,
contour,
detail,
edge_enhance,
sharpen,
max_,
min_,
median,
gaussian
]
elif 'rasubsetof' in augmentation_space:
r = re.findall(r'rasubsetof(\d+)', augmentation_space)
assert len(r) == 1
ALL_TRANSFORMS = random.sample(ALL_TRANSFORMS, int(r[0]))
print(f"Subsampled {len(ALL_TRANSFORMS)} augs: {ALL_TRANSFORMS}")
elif 'fixmirror' in augmentation_space:
ALL_TRANSFORMS = [
identity,
auto_contrast,
equalize,
rotate,
solarize,
mirrored_color, # enhancer
posterize,
mirrored_contrast, # enhancer
mirrored_brightness, # enhancer
mirrored_sharpness, # enhancer
shear_x,
shear_y,
translate_x,
translate_y
]
elif 'long' in augmentation_space:
ALL_TRANSFORMS = [
identity,
auto_contrast,
equalize,
rotate,
solarize,
color,
posterize,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
# sample_pairing,
blur,
invert,
flip_lr,
flip_ud,
cutout
]
elif 'uniaug' in augmentation_space:
ALL_TRANSFORMS = [
identity,
shear_x,
shear_y,
translate_x,
translate_y,
rotate,
auto_contrast,
invert, # only uniaug
equalize,
solarize,
posterize,
contrast,
color,
brightness,
sharpness,
cutout # only uniaug
]
elif 'autoaug_paper' in augmentation_space:
ALL_TRANSFORMS = [
shear_x,
shear_y,
translate_x,
translate_y,
rotate,
auto_contrast,
invert,
equalize,
solarize,
posterize,
contrast,
color,
brightness,
sharpness,
cutout,
sample_pairing
]
elif 'full' in augmentation_space:
ALL_TRANSFORMS = [
flip_lr,
flip_ud,
auto_contrast,
equalize,
invert,
rotate,
posterize,
crop_bilinear,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
cutout,
blur,
smooth
]
elif 'ohl' in augmentation_space:
ALL_TRANSFORMS = [
shear_x, # ok
shear_y, # ok
translate_x, # ok
translate_y, # ok
rotate, # ok
ohl_color, # nok
posterize, # ok
solarize, # ok
contrast, # ok
sharpness, # ok
brightness, # ok
auto_contrast,
equalize,
invert
]
elif 'custom' in augmentation_space:
assert custom_augmentation_space_augs is not None
custom_augmentation_space_augs_mapping = {
'identity': identity,
'auto_contrast': auto_contrast,
'equalize': equalize,
'rotate': rotate,
'solarize': solarize,
'color': color,
'posterize': posterize,
'contrast': contrast,
'brightness': brightness,
'sharpness': sharpness,
'shear_x': shear_x,
'shear_y': shear_y,
'translate_x': translate_x,
'translate_y': translate_y,
# sample_pairing,
'blur': blur,
'invert': invert,
'flip_lr': flip_lr,
'flip_ud': flip_ud,
'cutout': cutout,
'crop_bilinear': crop_bilinear,
'contour': contour,
'detail': detail,
'edge_enhance': edge_enhance,
'sharpen': sharpen,
'max_': max_,
'min_': min_,
'median': median,
'gaussian': gaussian
}
ALL_TRANSFORMS = []
ALL_TRANSFORMS += [
custom_augmentation_space_augs_mapping[aug] for aug in custom_augmentation_space_augs
]
print("CUSTOM Augs set to:", ALL_TRANSFORMS)
elif 'stacked_TA_cifar' in augmentation_space:
ALL_TRANSFORMS = [
identity,
auto_contrast,
equalize,
rotate, # extra coin-flip
solarize,
color, # enhancer
posterize,
contrast, # enhancer
brightness, # enhancer
sharpness, # enhancer
shear_x, # extra coin-flip
shear_y, # extra coin-flip
translate_x, # extra coin-flip
translate_y, # extra coin-flip
flip_lr_stackedTA,
cutout16,
mean_pad4_randcrop,
]
elif augmentation_space == 'Not_used':
ALL_TRANSFORMS = [None]
else:
if 'standard' not in augmentation_space:
raise ValueError(f"Unknown search space {augmentation_space}")
ALL_TRANSFORMS = [
identity,
auto_contrast,
equalize,
rotate, # extra coin-flip
solarize,
color, # enhancer
posterize,
contrast, # enhancer
brightness, # enhancer
sharpness, # enhancer
shear_x, # extra coin-flip
shear_y, # extra coin-flip
translate_x, # extra coin-flip
translate_y # extra coin-flip
]
set_augmentation_space('fixed_standard', 31)
def apply_augmentation(aug_idx, m, img):
return ALL_TRANSFORMS[aug_idx].pil_transformer(1., m)(img)
def num_augmentations():
return len(ALL_TRANSFORMS)
class TrivialAugment:
def __call__(self, img):
op = random.choices(ALL_TRANSFORMS, k=1)[0]
level = random.randint(0, PARAMETER_MAX)
img = op.pil_transformer(1., level)(img)
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
def __call__(self, img):
ops = random.choices(ALL_TRANSFORMS, k=self.n)
for op in ops:
img = op.pil_transformer(1., self.m)(img)
return img
class UniAugment:
def __call__(self, img):
ops = random.choices(ALL_TRANSFORMS, k=2)
for op in ops:
level = random.randint(0, PARAMETER_MAX)
img = op.pil_transformer(0.5, level)(img)
return img
class UniAugmentWeighted:
def __init__(self, n, probs):
self.n = n
self.probs = probs # [prob of zero augs, prob of one aug, ..]
def __call__(self, img):
k = random.choices(range(len(self.probs)), self.probs)[0]
ops = random.choices(ALL_TRANSFORMS, k=k)
for op in ops:
level = random.randint(0, PARAMETER_MAX)
img = op.pil_transformer(1., level)(img)
return img
| 23,352
| 29.210867
| 129
|
py
|
DeepAA
|
DeepAA-master/__init__.py
| 0
| 0
| 0
|
py
|
|
DeepAA
|
DeepAA-master/DeepAA_search.py
|
_PARALLEL_BATCH_small, _PARALLEL_BATCH_median, _PARALLEL_BATCH_large = 16, 128, 256 # 64
import os
import sys
import numpy as np
import tensorflow as tf
tf.config.threading.set_inter_op_parallelism_threads(0)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import multiprocessing
import argparse
from augmentation import get_mid_magnitude
from DeepAA_utils import test_loss, test_accuracy, train_loss, train_accuracy
from DeepAA_utils import get_model, get_dataset, get_augmentation, get_loss_fun, get_optim_net, get_optim_policy
from DeepAA_utils import get_lops_luniq, get_policy, get_img_size
from DeepAA_utils import PrefetchGenerator, save_policy
from tensorflow.keras.utils import Progbar
import matplotlib
matplotlib.use('Agg')
from utils import Logger as myLogger
from utils import repeat
parser = argparse.ArgumentParser()
# pretrain
parser.add_argument('--use_model', default='WRN_28_10', type=str, help='Model used for search')
parser.add_argument('--dataset', default='cifar10', type=str, help='Dataset, e.g., cifar10, imagenet')
parser.add_argument('--n_classes', default=100, type=int, help='Number of classes')
parser.add_argument('--nb_epochs', default=45, type=int, help='Number of epochs for pretrain')
parser.add_argument('--pretrain_size', default=5000, type=int, help='Number of images for pretraining')
parser.add_argument('--l_mags', default=13, type=int, help='Number of magnitudes, should be an odd number')
parser.add_argument('--policy_lr', default=0.025, type=float, help='Policy learning rate')
parser.add_argument('--pretrain_lr', default=0.1, type=float, help='maximum learning rate')
parser.add_argument('--batch_size', default=128, type=int, help='Training batch size')
parser.add_argument('--val_batch_size', default=1024, type=int, help='Validation batch size')
parser.add_argument('--test_batch_size', default=512, type=int, help='Testing batch size')
parser.add_argument('--clip_policy_gradient_norm', default=5.0, type=float, help='clipping the policy gradient by norm')
parser.add_argument('--debug', default=False, action='store_true', help='Debugging')
parser.add_argument('--seed', default=1, type=int, help='Random seed')
parser.add_argument('--policy_bn_training', default=False, action='store_true', help='use batchnorm for policy search, Default to False')
parser.add_argument('--n_policies', default=4, type=int, help='Number of policies')
parser.add_argument('--search_bno', default=256, type=int, help='Search steps for each policy')
parser.add_argument('--repeat_random_ops', default=False, action='store_true', help='repeat random operations (randCrop, randFlip, randCutout')
parser.add_argument('--N_repeat_random', default=1, type=int, help='Number to repeats')
parser.add_argument('--use_pool', default=False, action='store_true', help='Using multiprocessing for augmentation')
parser.add_argument('--chunk_size', default=None, type=int, help='Chunk size for augmentation')
parser.add_argument('--EXP_gT_factor', default=4, type=int, help='Expansion factor for calculating gradient')
parser.add_argument('--EXP_G', default=16, type=int, help='Expansion for Jacobian vector product')
parser.add_argument('--train_same_labels', default=16, type=int, help='Sample data from N randomly selected labels')
parser.add_argument('--mode', default='client', type=str, help='Dummy params')
parser.add_argument('--port', default=38277, type=int, help='Dummy params')
args=parser.parse_args()
if args.use_model in ['resnet50']:
_PARALLEL_BATCH = _PARALLEL_BATCH_small
elif args.use_model in ['WRN_28_10']:
_PARALLEL_BATCH = _PARALLEL_BATCH_median
elif args.use_model in ['WRN_40_2']:
_PARALLEL_BATCH = _PARALLEL_BATCH_large
else:
raise Exception('Unrecognized model {}'.format(args.use_model))
n_cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=n_cpus) if args.use_pool else None
np.random.seed(int(args.seed))
tf.random.set_seed(int(args.seed))
ops_mid_magnitude = get_mid_magnitude(args.l_mags)
args.l_ops, args.l_uniq = get_lops_luniq(args, ops_mid_magnitude)
args.img_size = get_img_size(args)
train_ds, val_ds, test_ds, search_ds = get_dataset(args)
nb_train_steps = len(train_ds)
augmentation_default, augmentation_search, augmentation_test = get_augmentation(args)
_, test_loss_fun, val_loss_fun = get_loss_fun()
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = get_model(args, args.use_model, args.n_classes)
checkpoint = tf.train.Checkpoint(model=model)
train_loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
optim_net = get_optim_net(args, nb_train_steps)
assert args.train_same_labels % mirrored_strategy.num_replicas_in_sync == 0, "Make sure val_same_labels can be divided by num_replicas_in_sync"
available_policies = np.arange(args.l_uniq, dtype=np.int32)[:, np.newaxis]
print(available_policies)
all_using_policies, all_using_optim_policies = [], []
for k in range(args.n_policies):
policy_train_ = get_policy(args, op_names=augmentation_search.op_names, ops_mid_magnitude=ops_mid_magnitude, available_policies= available_policies)
optim_policy_ = get_optim_policy(args.policy_lr)
all_using_policies.append(policy_train_)
all_using_optim_policies.append(optim_policy_)
train_ds.on_epoch_end()
train_ds_iter = iter(train_ds)
def get_pretrain_data():
global train_ds_iter
try:
images, labels = next(train_ds_iter)
except:
train_ds.on_epoch_end()
train_ds_iter = iter(train_ds)
images, labels = next(train_ds_iter)
bs = len(labels)
images, _ = augmentation_default(images, labels,
[None]*bs, [None]*bs,
use_post_aug=True, pool=pool)
return tf.convert_to_tensor(images, dtype=tf.float32), tf.convert_to_tensor(labels, tf.int32)
@tf.function(
input_signature=[tf.TensorSpec(shape=(None, *args.img_size), dtype=tf.float32),
tf.TensorSpec(shape=(None, ), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32)],
)
def train_step(images_aug, labels, clip_gradient_norm):
bs = len(images_aug)
with tf.GradientTape() as tape:
labels_aug_pred = model(images_aug, training=True)
loss_aug = tf.reduce_mean(train_loss_fun(labels, labels_aug_pred))
loss_aug += sum(model.losses)
grad_net = tape.gradient(loss_aug, model.trainable_variables)
if clip_gradient_norm > 0:
grad_net, _ = tf.clip_by_global_norm(grad_net, clip_norm=clip_gradient_norm)
optim_net.apply_gradients(zip(grad_net, model.trainable_variables))
del tape
return loss_aug, labels_aug_pred
def pretrain():
for epoch in range(args.nb_epochs):
if epoch == args.nb_epochs+1:
break
pbar = Progbar(target=nb_train_steps, interval=0.05, width=30)
print('\n Pretrain Epoch {} \n'.format(epoch))
for bno in range(nb_train_steps):
images, labels = get_pretrain_data()
loss, labels_pred = train_step(images, labels, clip_gradient_norm=5.)
train_loss(loss) # only record the last method's loss and accuracy
train_accuracy(labels, labels_pred)
pbar.update(bno + 1)
print('Saving the checkpoint to {}'.format('./results/images/ckpt{}/model_ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'], epoch-1)))
# FixMe: We need to save and then load the pretrain model, otherwise the pretrained model won't be synchronized across all GPUs
model.save_weights('./results/images/ckpt{}/model_ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'], args.nb_epochs))
model.load_weights('./results/images/ckpt{}/model_ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'], args.nb_epochs))
search_summary_writer = tf.summary.create_file_writer('./results/images/logs/cuda{}/search'.format(os.environ['CUDA_VISIBLE_DEVICES']))
graph_summary_writer = tf.summary.create_file_writer('./results/images/logs/cuda{}/graph'.format(os.environ['CUDA_VISIBLE_DEVICES']))
save_folder = './results/images/cuda{}'.format(os.environ['CUDA_VISIBLE_DEVICES'])
save_folder_ckpt = './results/images/ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'])
if not os.path.isdir(save_folder):
os.mkdir(save_folder)
if not os.path.isdir(save_folder_ckpt):
os.mkdir(save_folder_ckpt)
if __name__ == '__main__':
sys.stdout = myLogger('./results/images/cuda{}/stdout'.format(os.environ['CUDA_VISIBLE_DEVICES']))
# pretraining
if 'imagenet' in args.dataset:
checkpoint.restore('./pretrained_imagenet/imagenet_resnet50_ckpt')
else:
pretrain()
# disable batch normalization updating
for layer in model.layers:
if isinstance(layer, tf.keras.layers.experimental.SyncBatchNormalization) or isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = False
gradients_like = tf.nest.map_structure(lambda g: tf.zeros_like(g), model.trainable_variables)
@tf.function(
input_signature=[tf.TensorSpec(shape=(None, *args.img_size), dtype=tf.float32),
tf.TensorSpec(shape=(None, ), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32),
tf.TensorSpec(shape=(None, ), dtype=tf.float32)]
)
def step2_cal_JVP_vStep(images_aug2, labels, weight_1, weights_2):
if not args.debug:
print('*'*40 + ' retracing step2_cal_JVP_vStep ' + '*'*40)
with tf.GradientTape() as tape:
labels_aug_pred = model(images_aug2, training=False)
loss_aug = train_loss_fun(labels, labels_aug_pred)
grad_new = tape.gradient(loss_aug, model.trainable_variables, output_gradients = weights_2 * weight_1)
del tape
return grad_new
@tf.function(
input_signature=[tf.TensorSpec(shape=(None, *args.img_size), dtype=tf.float32),
tf.TensorSpec(shape=(None,), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32), tf.TensorSpec(shape=(), dtype=tf.float32),
[tf.TensorSpec.from_tensor(v) for v in tf.nest.flatten(gradients_like)]]
)
def step2_cal_JVP_jvpStep(images_aug2, labels, g_norm_train, g_norm_val, tangents):
if not args.debug:
print('*'*40 + ' retracing step2_cal_JVP_jStep ' + '*'*40)
with tf.autodiff.ForwardAccumulator(primals=model.trainable_variables, tangents=tangents) as acc:
labels_aug_pred = model(images_aug2, training=False)
loss_aug = train_loss_fun(labels, labels_aug_pred)
grad_importance_new = acc.jvp(loss_aug) / (g_norm_train * g_norm_val)
del acc
return grad_importance_new
@tf.function
def policy_gradient_stage1(reduce_random_mat,
images_aug, labels_aug,
images_val, labels_val,
weight_1, weights_2):
reduce_random_mat = tf.squeeze(reduce_random_mat)
images_aug = tf.squeeze(images_aug)
labels_aug = tf.squeeze(labels_aug)
images_val = tf.squeeze(images_val)
labels_val = tf.squeeze(labels_val)
weight_1 = tf.squeeze(weight_1)
weights_2 = tf.squeeze(weights_2)
bs = _PARALLEL_BATCH
val_bs = tf.shape(images_val)[0]
mult = tf.cast(val_bs, dtype=tf.float32)
def batching(L, bs, k): # Get Batch Range
start = k * bs
if start + bs > L:
end = L
else:
end = start + bs
return start, end
# 1) Step1: Get gradients of augmented and clean data
def one_batch_grad(imgs, labs, w1, w2, grad):
grad_new = step2_cal_JVP_vStep(imgs, labs, w1, w2)
grad = tf.nest.map_structure(lambda g1, g2: g1+g2, grad, grad_new)
return grad
@tf.function
def cal_grad(imgs, labs, w1, w2):
L = tf.shape(imgs)[0]
grad0 = tf.nest.map_structure(lambda g: tf.zeros_like(g), model.trainable_variables)
grad, _ = tf.while_loop(
cond = lambda grad_acc, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_acc, k: (one_batch_grad(imgs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
labs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
w1,
w2[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
grad_acc), k+1),
loop_vars = (grad0, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad
grad_val = cal_grad(images_val, labels_val, tf.constant(1.0, dtype=tf.float32), tf.ones(val_bs, dtype=tf.float32)/tf.cast(val_bs, dtype=tf.float32))
grad_train = cal_grad(images_aug, labels_aug, weight_1 * mult, weights_2)
grad_train = tf.nest.map_structure(lambda g: g/mult, grad_train) # for numerical stability
# 2) compute tangents
g_norm_val = tf.linalg.global_norm(grad_val)
g_norm_train = tf.linalg.global_norm(grad_train)
gradV_gradT = sum([tf.reduce_sum(g1*g2) for g1, g2 in zip(grad_val, grad_train)])
gradV_gradT_gradTrainNorm2 = gradV_gradT/(g_norm_train**2)
tangents = tf.nest.map_structure(lambda g1, g2: g1 - g2 * gradV_gradT_gradTrainNorm2, grad_val, grad_train)
# 3) compute JVP
def one_step_JVP(grad_importance_array, imgs, labs, k):
grad_importance_ = tf.stop_gradient(
step2_cal_JVP_jvpStep(imgs, labs, g_norm_train, g_norm_val, tangents)
)
grad_importance_array = grad_importance_array.write(tf.cast(k, dtype=tf.int32), grad_importance_)
return grad_importance_array
@tf.function
def run_JVP(imgs, labs):
L = tf.shape(imgs)[0]
grad_importance_array = tf.TensorArray(tf.float32, size=0, dynamic_size=True, infer_shape=False, element_shape=[None])
grad_importance_array, _ = tf.while_loop(
cond = lambda grad_TA, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_TA, k: (one_step_JVP(grad_TA,
imgs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
labs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
k), k+1),
loop_vars = (grad_importance_array, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad_importance_array.concat()
grad_importance = run_JVP(images_aug, labels_aug)
if args.repeat_random_ops:
grad_importance = tf.matmul(grad_importance[tf.newaxis], reduce_random_mat, transpose_b=True)[0]
# 4) compute cosine similarity
cos_sim = gradV_gradT / (g_norm_train * g_norm_val)
return cos_sim, grad_importance
@tf.function()
def policy_gradient_stage2(reduce_random_mat, images_aug_s, labels_aug_s, images_aug2, labels, images_val, labels_val, weights_gT, weights_G):
reduce_random_mat = tf.squeeze(reduce_random_mat)
images_aug_s = tf.squeeze(images_aug_s)
labels_aug_s = tf.squeeze(labels_aug_s)
images_val = tf.squeeze(images_val)
labels_val = tf.squeeze(labels_val)
weights_gT = tf.squeeze(weights_gT)
bs = _PARALLEL_BATCH
val_bs = tf.shape(images_val)[0]
mult = 1.0
def batching(L, bs, k): # Get Batch Range
start = k * bs
if start + bs > L:
end = L
else:
end = start + bs
return start, end
# 1) Step1: Get gradients of augmented and clean data
def one_batch_grad(imgs, labs, w1, w2, grad):
grad_new = step2_cal_JVP_vStep(imgs, labs, w1, w2)
grad = tf.nest.map_structure(lambda g1, g2: g1+g2, grad, grad_new)
return grad
@tf.function
def cal_grad(imgs, labs, w1, w2):
L = tf.shape(imgs)[0]
grad0 = tf.nest.map_structure(lambda g: tf.zeros_like(g), model.trainable_variables)
grad, _ = tf.while_loop(
cond = lambda grad_acc, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_acc, k: (one_batch_grad(imgs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
labs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
w1,
w2[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
grad_acc), k+1),
loop_vars = (grad0, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad
grad_val = cal_grad(images_val, labels_val, tf.constant(1.0, dtype=tf.float32), tf.ones(val_bs, dtype=tf.float32)/tf.cast(val_bs, dtype=tf.float32))
grad_train = cal_grad(images_aug_s, labels_aug_s, tf.constant(mult, dtype=tf.float32), weights_gT)
grad_train = tf.nest.map_structure(lambda g: g/mult, grad_train) # for numerical stability
# 2) compute tangents
g_norm_val = tf.linalg.global_norm(grad_val)
g_norm_train = tf.linalg.global_norm(grad_train)
gradV_gradT = sum([tf.reduce_sum(g1*g2) for g1, g2 in zip(grad_val, grad_train)])
gradV_gradT_gradTrainNorm2 = gradV_gradT/(g_norm_train**2)
tangents = tf.nest.map_structure(lambda g1, g2: g1 - g2 * gradV_gradT_gradTrainNorm2, grad_val, grad_train)
# 3) compute JVP
def one_step_JVP(grad_importance_array, imgs, labs, k):
grad_importance_ = tf.stop_gradient(
step2_cal_JVP_jvpStep(imgs, labs, g_norm_train, g_norm_val, tangents)
)
grad_importance_array = grad_importance_array.write(tf.cast(k, dtype=tf.int32), grad_importance_)
return grad_importance_array
@tf.function
def run_JVP(imgs, labs):
L = tf.shape(imgs)[0]
grad_importance_array = tf.TensorArray(tf.float32, size=0, dynamic_size=True, infer_shape=False, element_shape=[None])
grad_importance_array, _ = tf.while_loop(
cond = lambda grad_TA, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_TA, k: (one_step_JVP(grad_TA,
imgs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
labs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
k), k+1),
loop_vars = (grad_importance_array, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad_importance_array.concat()
aug_n, l_seq, w, h, c = images_aug2.shape
images_aug2_ = tf.reshape(images_aug2, [aug_n * l_seq, w, h, c])
labels_ = tf.reshape(labels, [aug_n * l_seq])
grad_importance = run_JVP(images_aug2_, labels_)
grad_importance = tf.reshape(grad_importance, [aug_n, l_seq])
if args.repeat_random_ops:
grad_importance = tf.matmul(grad_importance, reduce_random_mat, transpose_b=True)
# 4) compute cosine similarity
cos_sim = gradV_gradT / (g_norm_train * g_norm_val)
return cos_sim, grad_importance
@tf.function
def distributed_train_stage1(dist_inputs):
per_replica_cos_sim, per_replica_grad_importance = mirrored_strategy.run(policy_gradient_stage1, args=(*dist_inputs,))
return mirrored_strategy.experimental_local_results(per_replica_cos_sim), mirrored_strategy.experimental_local_results(per_replica_grad_importance)
@tf.function
def distributed_train_stage2(dist_inputs):
per_replica_cos_sim, per_replica_grad_importance = mirrored_strategy.run(policy_gradient_stage2, args=(*dist_inputs,))
return mirrored_strategy.experimental_local_results(per_replica_cos_sim), mirrored_strategy.experimental_local_results(per_replica_grad_importance)
def train_policy_stage1(stage, images_val_, labels_val_, images_batch, labels_batch):
search_bs = len(images_val_)
val_bs = len(images_val_[0])
assert search_bs == len(images_batch), 'Check dimensions'
assert len(images_val_) % search_bs == 0, 'Use different validation batch for different search data point'
EXP = 1 # expansion factor
images_val_, labels_val_ = augmentation_test(sum(images_val_, []), np.concatenate(labels_val_),
np.array([[0]]*search_bs*val_bs, dtype=np.int32),
np.array([[0]]*search_bs*val_bs, dtype=np.float32) / float(args.l_mags - 1),
use_post_aug=True, pool=pool, chunksize=args.chunk_size)
images_val_ = np.reshape(images_val_, [search_bs, val_bs, *args.img_size])
labels_val_ = np.reshape(labels_val_, [search_bs, val_bs])
images_batch = repeat(images_batch, EXP, axis=0)
labels_batch = repeat(labels_batch, EXP, axis=0)
ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp = all_using_policies[stage-1].get_dense_aug(None, args.repeat_random_ops)
if isinstance(images_batch[0], list):
images_aug_last, labels_aug_last = augmentation_search(repeat(sum(images_batch,[]), len(ops_dense), axis=0), repeat(np.concatenate(labels_batch), len(ops_dense), axis=0),
np.tile(ops_dense, [search_bs * EXP, 1]), np.tile(mags_dense, [search_bs * EXP, 1]).astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool,
chunksize=None)
images_aug_last = np.reshape(images_aug_last, [-1, len(ops_dense), *args.img_size])
labels_aug_last = np.reshape(labels_aug_last, [-1, len(ops_dense)])
weights_1 = np.ones(search_bs*EXP, dtype=np.float32)
weights_2 = probs_exp
assert search_bs % mirrored_strategy.num_replicas_in_sync == 0, 'Make sure that search_bs is multiples of mirrored_trategy'
all_local_cos_sim, all_local_grad_importance = [], []
for used_batch in range(0, search_bs, mirrored_strategy.num_replicas_in_sync):
get_value_fn = lambda ctx: (
tf.constant(reduce_random_mat, dtype=tf.float32),
tf.convert_to_tensor(images_aug_last[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_aug_last[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(images_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(weights_1[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.constant(weights_2, dtype=tf.float32),
)
dist_values = mirrored_strategy.experimental_distribute_values_from_function(get_value_fn)
all_local_cos_sim_, all_local_grad_importance_ = distributed_train_stage1(dist_values)
all_local_cos_sim.extend(all_local_cos_sim_)
all_local_grad_importance.extend(all_local_grad_importance_)
grad_importance = tf.stack(all_local_grad_importance, axis=0)
grad_importance = tf.reduce_mean(grad_importance, axis=0)
mult_factor = 0.25
with tf.GradientTape() as tape:
probs = tf.nn.softmax(all_using_policies[stage-1].logits)
loss_policy_final = -tf.reduce_sum(grad_importance * probs) * mult_factor
grad_policy = tape.gradient(loss_policy_final, all_using_policies[stage-1].trainable_variables)
all_using_optim_policies[stage-1].apply_gradients(zip(grad_policy, all_using_policies[stage-1].trainable_variables))
del tape
def train_policy_stage2(stage, images_val_, labels_val_, images_batch, labels_batch):
assert stage >= 2, 'depth starts from 2'
search_bs = len(images_val_)
val_bs = len(images_val_[0])
assert search_bs == len(images_batch), 'Check dimension'
assert len(images_val_) % search_bs == 0, 'Use different validation batch for different search data point'
images_val_, labels_val_ = augmentation_test(sum(images_val_, []), np.concatenate(labels_val_),
np.array([[0]]*search_bs*val_bs, dtype=np.int32),
np.array([[0]]*search_bs*val_bs, dtype=np.float32) / float(args.l_mags - 1),
use_post_aug=True, pool=pool, chunksize=args.chunk_size)
images_val_ = np.reshape(images_val_, [search_bs, val_bs, *args.img_size])
labels_val_ = np.reshape(labels_val_, [search_bs, val_bs])
EXP_gT = args.l_uniq * args.EXP_gT_factor # Expansion for calculating gradients
EXP_G = args.EXP_G # Expansion for calculating JVP
images_batch_EXPgT = repeat(images_batch, EXP_gT, axis=0)
labels_batch_EXPgT = repeat(labels_batch, EXP_gT, axis=0)
images_batch_EXPG = repeat(images_batch, EXP_G, axis=0)
labels_batch_EXPG = repeat(labels_batch, EXP_G, axis=0)
images_aug_s, labels_aug_s = images_batch_EXPgT, labels_batch_EXPgT
ops_s, mags_s = [], []
for k_stage in range(1, stage+1):
dummy_images = [None] * search_bs * EXP_gT
assert search_bs * EXP_gT == len(images_aug_s)
assert len(images_aug_s[0]) == 1
ops_s_, mags_s_, ops_mags_idx_s, probs_sample = all_using_policies[k_stage-1].sample(dummy_images, dummy_images, None, augNum=1)
ops_s.append(ops_s_)
mags_s.append(mags_s_)
ops_s = np.concatenate(ops_s, axis=1)
mags_s = np.concatenate(mags_s, axis=1)
images_aug_s, labels_aug_s = augmentation_search(sum(images_aug_s, []), np.concatenate(labels_aug_s, axis=0),
ops_s, mags_s.astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool,
chunksize=None)
images_aug_s = np.reshape(images_aug_s, [search_bs, EXP_gT, *args.img_size])
labels_aug_s = np.reshape(labels_aug_s, [search_bs, EXP_gT])
images_aug_k, labels_aug_k = images_batch_EXPG, labels_batch_EXPG
ops_k, mags_k = [], []
for k_stage in range(1, stage):
dummy_images = [None] * search_bs * EXP_G
assert search_bs * EXP_G == len(images_aug_k)
assert len(images_aug_k[0]) == 1
ops_k_, mags_k_, ops_mags_idx_k, probs_sample = all_using_policies[k_stage-1].sample(dummy_images, dummy_images, None, augNum=1)
ops_k.append(ops_k_)
mags_k.append(mags_k_)
ops_k = np.concatenate(ops_k, axis=1)
mags_k = np.concatenate(mags_k, axis=1)
images_aug_k, labels_aug_k = augmentation_search(sum(images_aug_k, []), np.concatenate(labels_aug_k, axis=0),
ops_k, mags_k.astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool, aug_finish=False, chunksize=args.chunk_size)
ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp = all_using_policies[stage-1].get_dense_aug(None, repeat_random_ops=args.repeat_random_ops)
images_aug_k, labels_aug_k = augmentation_search(repeat(images_aug_k, len(ops_dense), axis=0), np.repeat(labels_aug_k, len(ops_dense), axis=0),
np.tile(ops_dense, [search_bs * EXP_G, 1]), np.tile(mags_dense, [search_bs * EXP_G, 1]).astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool,
chunksize=None)
images_aug_k = np.reshape(images_aug_k, [search_bs, EXP_G, len(ops_dense), *args.img_size])
labels_aug_k = np.reshape(labels_aug_k, [search_bs, EXP_G, len(ops_dense)])
weights_gT = np.ones(EXP_gT, dtype=np.float32) / float(EXP_gT)
weights_G = np.ones(EXP_G, dtype=np.float32) / float(EXP_G)
assert search_bs % mirrored_strategy.num_replicas_in_sync == 0, 'Make sure that search_bs is multiples of mirrored_trategy'
all_local_cos_sim, all_local_grad_importance = [], []
for used_batch in range(0, search_bs, mirrored_strategy.num_replicas_in_sync):
get_value_fn = lambda ctx: (
tf.convert_to_tensor(reduce_random_mat, dtype=tf.float32),
tf.convert_to_tensor(images_aug_s[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_aug_s[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(images_aug_k[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_aug_k[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(images_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(weights_gT, dtype=tf.float32),
tf.convert_to_tensor(weights_G, dtype=tf.float32),
)
dist_values = mirrored_strategy.experimental_distribute_values_from_function(get_value_fn)
all_local_cos_sim_, all_local_grad_importance_ = distributed_train_stage2(dist_values)
all_local_cos_sim.extend(all_local_cos_sim_)
all_local_grad_importance.extend(all_local_grad_importance_)
grad_importance = tf.stack(all_local_grad_importance, axis=0)
grad_importance = tf.reduce_mean(grad_importance, axis=1)
assert grad_importance.shape == [search_bs, args.l_uniq], 'Check dimension'
grad_importance = tf.reduce_mean(grad_importance.numpy(), axis=0) - tf.math.reduce_std(grad_importance.numpy(), axis=0)
mult_factor = float(search_bs)
with tf.GradientTape() as tape:
probs = tf.nn.softmax(all_using_policies[stage - 1].logits)
loss_policy_final = -tf.reduce_sum(grad_importance * probs) * mult_factor
grad_policy = tape.gradient(loss_policy_final, all_using_policies[stage - 1].trainable_variables)
all_using_optim_policies[stage - 1].apply_gradients(zip(grad_policy, all_using_policies[stage - 1].trainable_variables))
del tape
def search_policy(search_bno, search_bs=16, val_bs=128):
data_prefetch_iterator = PrefetchGenerator(search_ds, val_ds, args.n_classes, search_bs, val_bs)
for stage in range(1, args.n_policies + 1):
pbar = Progbar(target=search_bno, interval=1, width=30)
for bno in range(search_bno):
images_val_, labels_val_, images_batch, labels_batch = data_prefetch_iterator.next()
if stage == 1:
train_policy_stage1(stage, images_val_, labels_val_, images_batch, labels_batch)
elif stage > 1:
train_policy_stage2(stage, images_val_, labels_val_, images_batch, labels_batch)
pbar.update(bno + 1)
if __name__ == '__main__':
search_policy(search_bno=args.search_bno, search_bs=args.train_same_labels, val_bs=64)
save_policy(args, all_using_policies, augmentation_search)
pool.close()
pool.join()
| 31,956
| 53.908935
| 187
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/lr_scheduler.py
|
import torch
from theconf import Config as C
def adjust_learning_rate_resnet(optimizer):
"""
Sets the learning rate to the initial LR decayed by 10 on every predefined epochs
Ref: AutoAugment
"""
if C.get()['epoch'] == 90:
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [30, 60, 80])
elif C.get()['epoch'] == 180:
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [60, 120, 160])
elif C.get()['epoch'] == 270:
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [90, 180, 240])
else:
raise ValueError('invalid epoch=%d for resnet scheduler' % C.get()['epoch'])
| 645
| 31.3
| 85
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/augmentations.py
|
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import numpy as np
import torch
from DeepAA_evaluate import autoaugment, fast_autoaugment
import aug_lib
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def get_randaugment(n,m,weights,bs):
if n == 101 and m == 101:
return autoaugment.CifarAutoAugment(fixed_posterize=False)
if n == 102 and m == 102:
return autoaugment.CifarAutoAugment(fixed_posterize=True)
if n == 201 and m == 201:
return autoaugment.SVHNAutoAugment(fixed_posterize=False)
if n == 202 and m == 202:
return autoaugment.SVHNAutoAugment(fixed_posterize=False)
if n == 301 and m == 301:
return fast_autoaugment.cifar10_faa
if n == 401 and m == 401:
return fast_autoaugment.svhn_faa
assert m < 100 and n < 100
if m == 0:
if weights is not None:
return aug_lib.UniAugmentWeighted(n, probs=weights)
elif n == 0:
return aug_lib.UniAugment()
else:
raise ValueError('Wrong RandAug Params.')
else:
assert n > 0 and m > 0
return aug_lib.RandAugment(n, m)
| 2,507
| 30.35
| 72
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/deep_autoaugment.py
|
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import math
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
import os
import json
import hashlib
import requests
import scipy
from torchvision.transforms.transforms import Compose
random_mirror = True
##########################################################################
CIFAR_MEANS = np.array([0.49139968, 0.48215841, 0.44653091], dtype=np.float32)
# CIFAR10_STDS = np.array([0.24703223, 0.24348513, 0.26158784], dtype=np.float32)
CIFAR_STDS = np.array([0.2023, 0.1994, 0.2010], dtype=np.float32)
SVHN_MEANS = np.array([0.4379, 0.4440, 0.4729], dtype=np.float32)
SVHN_STDS = np.array([0.1980, 0.2010, 0.1970], dtype=np.float32)
IMAGENET_MEANS = np.array([0.485, 0.456, 0.406], dtype=np.float32)
IMAGENET_STDS = np.array([0.229, 0.224, 0.225], dtype=np.float32)
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateXAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateYAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random_mirror and random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def Posterize(img, v): # [4, 8]
assert 4 <= v <= 8
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Posterize2(img, v): # [0, 4]
assert 0 <= v <= 4
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.:
return img
v = v * img.size[0]
return Cutout_default(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
# x0 = np.random.uniform(w)
# y0 = np.random.uniform(h)
x0 = random.uniform(0, w)
y0 = random.uniform(0, h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
# color = (125, 123, 114)
color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
# =============== OPS for DeepAA ==============:
def mean_pad_randcrop(img, v):
# v: Pad with mean value=[125, 123, 114] by v pixels on each side and then take random crop
assert v <= 10, 'The maximum shift should be less then 10'
padded_size = (img.size[0] + 2*v, img.size[1] + 2*v)
new_img = PIL.Image.new('RGB', padded_size, color=(125, 123, 114))
# new_img = PIL.Image.new('RGB', padded_size, color=(0, 0, 0))
new_img.paste(img, (v, v))
top = random.randint(0, v*2)
left = random.randint(0, v*2)
new_img = new_img.crop((left, top, left + img.size[0], top + img.size[1]))
return new_img
def Cutout_default(img, v): # Used in FastAA, different from CutoutABS, the actual cutout size can be smaller than v on the boundary
# Passed random number generation test
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
# x = np.random.uniform(w)
# y = np.random.uniform(h)
if v <= 16: # for cutout of cifar and SVHN
assert w == h == 32
x = random.uniform(0, w)
y = random.uniform(0, h)
x0 = int(min(w, max(0, x - v // 2))) # clip to the range (0, w)
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
# img = CutoutAbs(img, v)
return img
else:
raise NotImplementedError
def RandCrop(img, _):
v = 4
return mean_pad_randcrop(img, v)
def RandCutout(img, _):
v = 16 # Cutout 0.5 means 0.5*32=16 pixels as in the FastAA paper
return Cutout_default(img, v)
def RandCutout60(img, _):
v = 60 # Cutout 0.5 means 0.5*32=16 pixels as in the FastAA paper
return Cutout_default(img, v)
def RandFlip(img, _):
if random.random() > 0.5:
img = Flip(img, None)
return img
def Identity(img, _):
return img
# ===================== ops for imagenet =============
def RandResizeCrop_imagenet(img, _):
# ported from torchvision
# for ImageNet use only
scale = (0.08, 1.0)
ratio = (3. / 4., 4. / 3.)
size = IMAGENET_SIZE # (224, 224)
def get_params(img, scale, ratio):
width, height = img.size
area = float(width * height)
log_ratio = [math.log(r) for r in ratio]
for _ in range(10):
target_area = area * random.uniform(scale[0], scale[1])
aspect_ratio = math.exp(random.uniform(log_ratio[0], log_ratio[1]))
w = round(math.sqrt(target_area * aspect_ratio))
h = round(math.sqrt(target_area / aspect_ratio))
if 0 < w <= width and 0 < h <= height:
top = random.randint(0, height - h)
left = random.randint(0, width - w)
return left, top, w, h
# fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = round(w / min(ratio))
elif in_ratio > max(ratio):
h = height
w = round(h * max(ratio))
else:
w = width
h = height
top = (height - h) // 2
left = (width - w) // 2
return left, top, w, h
left, top, w_box, h_box = get_params(img, scale, ratio)
box = (left, top, left + w_box, top + h_box)
img = img.resize(size=size, resample=PIL.Image.CUBIC, box=box)
return img
def Resize_imagenet(img, size):
w, h = img.size
if isinstance(size, int):
short, long = (w, h) if w <= h else (h, w)
if short == size:
return img
new_short, new_long = size, int(size * long / short)
new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
return img.resize((new_w, new_h), PIL.Image.BICUBIC)
elif isinstance(size, tuple) or isinstance(size, list):
assert len(size) == 2, 'Check the size {}'.format(size)
return img.resize(size, PIL.Image.BICUBIC)
else:
raise Exception
def centerCrop_imagenet(img, _):
# for ImageNet only
# https://github.com/pytorch/vision/blob/master/torchvision/transforms/functional.py
crop_width, crop_height = IMAGENET_SIZE # (224,224)
image_width, image_height = img.size
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
img = pad(img, padding_ltrb, fill=0)
image_width, image_height = img.size
if crop_width == image_width and crop_height == image_height:
return img
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))
def _parse_fill(fill, img, name="fillcolor"):
# Process fill color for affine transforms
num_bands = len(img.getbands())
if fill is None:
fill = 0
if isinstance(fill, (int, float)) and num_bands > 1:
fill = tuple([fill] * num_bands)
if isinstance(fill, (list, tuple)):
if len(fill) != num_bands:
msg = ("The number of elements in 'fill' does not match the number of "
"bands of the image ({} != {})")
raise ValueError(msg.format(len(fill), num_bands))
fill = tuple(fill)
return {name: fill}
def pad(img, padding_ltrb, fill=0, padding_mode='constant'):
if isinstance(padding_ltrb, list):
padding_ltrb = tuple(padding_ltrb)
if padding_mode == 'constant':
opts = _parse_fill(fill, img, name='fill')
if img.mode == 'P':
palette = img.getpalette()
image = PIL.ImageOps.expand(img, border=padding_ltrb, **opts)
image.putpalette(palette)
return image
return PIL.ImageOps.expand(img, border=padding_ltrb, **opts)
elif len(padding_ltrb) == 4:
image_width, image_height = img.size
cropping = -np.minimum(padding_ltrb, 0)
if cropping.any():
crop_left, crop_top, crop_right, crop_bottom = cropping
img = img.crop((crop_left, crop_top, image_width - crop_right, image_height - crop_bottom))
pad_left, pad_top, pad_right, pad_bottom = np.maximum(padding_ltrb, 0)
if img.mode == 'P':
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
img = PIL.Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return PIL.Image.fromarray(img)
else:
raise Exception
def augment_list(for_autoaug=True, for_DeepAA_cifar=True, for_DeepAA_imagenet=True): # 16 oeprations and their ranges
l = [
(ShearX, -0.3, 0.3), # 0
(ShearY, -0.3, 0.3), # 1
(TranslateX, -0.45, 0.45), # 2
(TranslateY, -0.45, 0.45), # 3
(Rotate, -30, 30), # 4
(AutoContrast, 0, 1), # 5
(Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 256), # 8
(Posterize, 4, 8), # 9
(Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
(Cutout, 0, 0.2), # 14
# (SamplePairing(imgs), 0, 0.4), # 15
]
if for_autoaug:
l += [
(CutoutAbs, 0, 20), # compatible with auto-augment
(Posterize2, 0, 4), # 9
(TranslateXAbs, 0, 10), # 9
(TranslateYAbs, 0, 10), # 9
]
if for_DeepAA_cifar:
l += [
(Identity, 0., 1.0),
(RandFlip, 0., 1.0), # Additional 15
(RandCutout, 0., 1.0), # 16
(RandCrop, 0., 1.0), # 17
]
if for_DeepAA_imagenet:
l += [
(RandResizeCrop_imagenet, 0., 1.0),
(RandCutout60, 0., 1.0)
]
return l
augment_dict = {fn.__name__: (fn, v1, v2) for fn, v1, v2 in augment_list()}
def Cutout16(img, _):
# return CutoutAbs(img, 16)
return Cutout_default(img, 16)
augmentation_TA_list = [
(Identity, 0., 1.0),
(ShearX, -0.3, 0.3), # 0
(ShearY, -0.3, 0.3), # 1
(TranslateX, -0.45, 0.45), # 2
(TranslateY, -0.45, 0.45), # 3
(Rotate, -30, 30), # 4
(AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 256), # 8
(Posterize, 4, 8), # 9
(Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
(Flip, 0., 1.0), # Additional 15
(Cutout16, 0, 20), # (RandCutout, 0, 20), # compatible with auto-augment
(RandCrop, 0., 1.0), # 17
]
def get_augment(name):
return augment_dict[name]
def apply_augment(img, name, level):
augment_fn, low, high = get_augment(name)
return augment_fn(img.copy(), level * (high - low) + low)
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class Augmentation_DeepAA(object):
def __init__(self, EXP='cifar', use_crop=False):
self.use_crop = use_crop
policy_data = np.load('./policy_port/policy_DeepAA_{}.npz'.format(EXP))
self.policy_probs = policy_data['policy_probs']
self.l_ops = policy_data['l_ops']
self.l_mags = policy_data['l_mags']
self.ops = policy_data['ops']
self.mags = policy_data['mags']
self.op_names = policy_data['op_names']
def __call__(self, img):
for k_policy in self.policy_probs:
k_samp = random.choices(range(len(k_policy)), weights=k_policy, k=1)[0]
op, mag = np.squeeze(self.ops[k_samp]), np.squeeze(self.mags[k_samp]).astype(np.float32)/float(self.l_mags-1)
op_name = self.op_names[op].split(':')[0]
img = apply_augment(img, op_name, mag)
if self.use_crop:
w, h = img.size
if w==IMAGENET_SIZE[0] and h==IMAGENET_SIZE[1]:
return img
# return centerCrop_imagenet(Resize_imagenet(img, 256), None)
return centerCrop_imagenet(img, None)
return img
IMAGENET_SIZE = (224, 224)
| 16,098
| 30.879208
| 133
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/utils.py
|
import torch
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import torchvision.transforms.functional as F
plt.rcParams["savefig.bbox"] = 'tight'
def save_images(imgs, dir):
if not isinstance(imgs, list):
imgs = [imgs]
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = img.detach()
img = F.to_pil_image(img)
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
fix.savefig(dir)
return fix
| 590
| 24.695652
| 75
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/data.py
|
import logging
import os
import random
from collections import Counter
import torchvision
from PIL import Image
from torch.utils.data import SubsetRandomSampler, Sampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.dataset import ConcatDataset, Subset
from torchvision.transforms import transforms
from sklearn.model_selection import StratifiedShuffleSplit
from theconf import Config as C
from DeepAA_evaluate.augmentations import *
from DeepAA_evaluate.common import get_logger, copy_and_replace_transform, stratified_split, denormalize
from DeepAA_evaluate.imagenet import ImageNet
from DeepAA_evaluate.augmentations import Lighting
from DeepAA_evaluate.deep_autoaugment import Augmentation_DeepAA
logger = get_logger('DeepAA_evaluate')
logger.setLevel(logging.INFO)
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010) # these are for CIFAR 10, not for cifar100 actaully. They are pretty similar, though.
# mean für cifar 100: tensor([0.5071, 0.4866, 0.4409])
def expand(num_classes, dtype, tensor):
e = torch.zeros(
tensor.size(0), num_classes, dtype=dtype, device=torch.device("cuda")
)
e = e.scatter(1, tensor.unsqueeze(1), 1.0)
return e
def mixup_data(data, label, alpha):
with torch.no_grad():
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1.0
batch_size = data.size()[0]
index = torch.randperm(batch_size).to(data.device)
mixed_data = lam * data + (1.0-lam) * data[index,:]
return mixed_data, label, label[index], lam
class PrefetchedWrapper(object):
# Ref: https://github.com/NVIDIA/DeepLearningExamples/blob/d788e8d4968e72c722c5148a50a7d4692f6e7bd3/PyTorch/Classification/ConvNets/image_classification/dataloaders.py#L405
def prefetched_loader(loader, num_classes, one_hot):
mean = (
torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255])
.cuda()
.view(1, 3, 1, 1)
)
std = (
torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255])
.cuda()
.view(1, 3, 1, 1)
)
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float()
if one_hot:
raise Exception('Currently do not use onehot encoding, becasue num_calsses==None')
next_target = expand(num_classes, torch.float, next_target)
next_input = next_input.sub_(mean).div_(std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader, start_epoch, num_classes, one_hot):
self.dataloader = dataloader
self.epoch = start_epoch
self.one_hot = one_hot
self.num_classes = num_classes
def __iter__(self):
if self.dataloader.sampler is not None and isinstance(
self.dataloader.sampler, torch.utils.data.distributed.DistributedSampler
):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(
self.dataloader, self.num_classes, self.one_hot
)
def __len__(self):
return len(self.dataloader)
def get_dataloaders(dataset, batch, dataroot, split=0.15, split_idx=0, distributed=False, started_with_spawn=False, summary_writer=None):
print(f'started with spawn {started_with_spawn}')
dataset_info = {}
pre_transform_train = transforms.Compose([])
if 'cifar' in dataset and (C.get()['aug'] in ['DeepAA']):
transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
dataset_info['mean'] = _CIFAR_MEAN
dataset_info['std'] = _CIFAR_STD
dataset_info['img_dims'] = (3,32,32)
dataset_info['num_labels'] = 100 if '100' in dataset and 'ten' not in dataset else 10
elif 'cifar' in dataset:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
dataset_info['mean'] = _CIFAR_MEAN
dataset_info['std'] = _CIFAR_STD
dataset_info['img_dims'] = (3,32,32)
dataset_info['num_labels'] = 100 if '100' in dataset and 'ten' not in dataset else 10
elif 'pre_transform_cifar' in dataset:
pre_transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
dataset_info['mean'] = _CIFAR_MEAN
dataset_info['std'] = _CIFAR_STD
dataset_info['img_dims'] = (3, 32, 32)
dataset_info['num_labels'] = 100 if '100' in dataset and 'ten' not in dataset else 10
elif 'svhn' in dataset:
svhn_mean = [0.4379, 0.4440, 0.4729]
svhn_std = [0.1980, 0.2010, 0.1970]
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(svhn_mean, svhn_std),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(svhn_mean, svhn_std),
])
dataset_info['mean'] = svhn_mean
dataset_info['std'] = svhn_std
dataset_info['img_dims'] = (3, 32, 32)
dataset_info['num_labels'] = 10
elif 'imagenet' in dataset and C.get()['aug'] in ['DeepAA']:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Image size (224, 224) instead of (224, 244) in TA
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'imagenet' in dataset and C.get()['aug']=='inception':
transform_train = transforms.Compose([
transforms.RandomResizedCrop((224,224), scale=(0.08, 1.0), interpolation=Image.BICUBIC), # Image size (224, 224) instead of (224, 244) in TA
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'smallwidth_imagenet' in dataset:
transform_train = transforms.Compose([
transforms.RandomResizedCrop((224,224), scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'ohl_pipeline_imagenet' in dataset:
pre_transform_train = transforms.Compose([
transforms.RandomResizedCrop((224, 224), scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[1.,1.,1.])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[1.,1.,1.])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [1.,1.,1.]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'largewidth_imagenet' in dataset:
transform_train = transforms.Compose([
transforms.RandomResizedCrop((224, 244), scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224, 244)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3, 224, 244)
dataset_info['num_labels'] = 1000
else:
raise ValueError('dataset=%s' % dataset)
logger.debug('augmentation: %s' % C.get()['aug'])
if C.get()['aug'] == 'randaugment':
assert not C.get()['randaug'].get('corrected_sample_space') and not C.get()['randaug'].get('google_augmentations')
transform_train.transforms.insert(0, get_randaugment(n=C.get()['randaug']['N'], m=C.get()['randaug']['M'],
weights=C.get()['randaug'].get('weights',None), bs=C.get()['batch']))
elif C.get()['aug'] in ['default', 'inception', 'inception320']:
pass
elif C.get()['aug'] in ['DeepAA']:
transform_train.transforms.insert(0, Augmentation_DeepAA(EXP = C.get()['deepaa']['EXP'],
use_crop = ('imagenet' in dataset) and C.get()['aug'] == 'DeepAA'
))
else:
raise ValueError('not found augmentations. %s' % C.get()['aug'])
transform_train.transforms.insert(0, pre_transform_train)
if C.get()['cutout'] > 0:
transform_train.transforms.append(CutoutDefault(C.get()['cutout']))
if 'preprocessor' in C.get():
if 'imagenet' in dataset:
print("Only using cropping/centering transforms on dataset, since preprocessor active.")
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.0), interpolation=Image.BICUBIC),
PILImageToHWCByteTensor(),
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop(224),
PILImageToHWCByteTensor(),
])
else:
print("Not using any transforms in dataset, since preprocessor is active.")
transform_train = PILImageToHWCByteTensor()
transform_test = PILImageToHWCByteTensor()
if dataset in ('cifar10', 'pre_transform_cifar10'):
total_trainset = torchvision.datasets.CIFAR10(root=dataroot, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=dataroot, train=False, download=True, transform=transform_test)
elif dataset in ('cifar100', 'pre_transform_cifar100'):
total_trainset = torchvision.datasets.CIFAR100(root=dataroot, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=dataroot, train=False, download=True, transform=transform_test)
elif dataset == 'svhncore':
total_trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)
elif dataset == 'svhn':
trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True, transform=transform_train)
extraset = torchvision.datasets.SVHN(root=dataroot, split='extra', download=True, transform=transform_train)
total_trainset = ConcatDataset([trainset, extraset])
testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)
elif dataset in ('imagenet', 'ohl_pipeline_imagenet', 'smallwidth_imagenet'):
# Ignore archive only means to not to try to extract the files again, because they already are and the zip files
# are not there no more
total_trainset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=transform_train, ignore_archive=True)
testset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=transform_test, ignore_archive=True)
# compatibility
total_trainset.targets = [lb for _, lb in total_trainset.samples]
else:
raise ValueError('invalid dataset name=%s' % dataset)
if 'throwaway_share_of_ds' in C.get():
assert 'val_step_trainloader_val_share' not in C.get()
share = C.get()['throwaway_share_of_ds']['throwaway_share']
train_subset_inds, rest_inds = stratified_split(total_trainset.targets if hasattr(total_trainset, 'targets') else list(total_trainset.labels),share)
if C.get()['throwaway_share_of_ds']['use_throwaway_as_val']:
testset = copy_and_replace_transform(Subset(total_trainset, rest_inds), transform_test)
total_trainset = Subset(total_trainset, train_subset_inds)
train_sampler = None
if split > 0.0:
sss = StratifiedShuffleSplit(n_splits=5, test_size=split, random_state=0)
sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
for _ in range(split_idx + 1):
train_idx, valid_idx = next(sss)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetSampler(valid_idx)
else:
valid_sampler = SubsetSampler([])
if distributed:
assert split == 0.0, "Split not supported for distributed training."
if C.get().get('all_workers_use_the_same_batches', False):
train_sampler = DistributedSampler(total_trainset, num_replicas=1, rank=0)
else:
train_sampler = DistributedSampler(total_trainset)
test_sampler = None
test_train_sampler = None # if these are specified, acc/loss computation is wrong for results.
# while one has to say, that this setting leads to the test sets being computed seperately on each gpu which
# might be considered not-very-climate-friendly
else:
test_sampler = None
test_train_sampler = None
trainloader = torch.utils.data.DataLoader(
total_trainset, batch_size=batch, shuffle=train_sampler is None, num_workers= os.cpu_count()//8 if distributed else 32, # fix the data laoder
pin_memory=True,
sampler=train_sampler, drop_last=True, persistent_workers=True)
validloader = torch.utils.data.DataLoader(
total_trainset, batch_size=batch, shuffle=False, num_workers=0 if started_with_spawn else 8, pin_memory=True,
sampler=valid_sampler, drop_last=False)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch, shuffle=False, num_workers=16 if started_with_spawn else 8, pin_memory=True,
drop_last=False, sampler=test_sampler, persistent_workers=True
)
# We use this 'hacky' solution s.t. we do not need to keep the dataset twice in memory.
test_total_trainset = copy_and_replace_transform(total_trainset, transform_test)
test_trainloader = torch.utils.data.DataLoader(
test_total_trainset, batch_size=batch, shuffle=False, num_workers=0 if started_with_spawn else 8, pin_memory=True,
drop_last=False, sampler=test_train_sampler
)
test_trainloader.denorm = lambda x: denormalize(x, dataset_info['mean'], dataset_info['std'])
return train_sampler, trainloader, validloader, testloader, test_trainloader, dataset_info
# trainloader_prefetch = PrefetchedWrapper(trainloader, start_epoch=0, num_classes=None, one_hot=False)
# testloader_prefetch = PrefetchedWrapper(testloader, start_epoch=0, num_classes=None, one_hot=False)
# return train_sampler, trainloader_prefetch, validloader, testloader_prefetch, test_trainloader, dataset_info
class SubsetSampler(Sampler):
r"""Samples elements from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (i for i in self.indices)
def __len__(self):
return len(self.indices)
| 19,585
| 44.761682
| 176
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/fast_autoaugment.py
|
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from torchvision.transforms.transforms import Compose
random_mirror = True
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateXAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateYAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random_mirror and random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def Posterize(img, v): # [4, 8]
assert 4 <= v <= 8
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Posterize2(img, v): # [0, 4]
assert 0 <= v <= 4
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
# =============== OPS for DeepAA ==============:
def mean_pad_randcrop(img, v):
# v: Pad with mean value=[125, 123, 114] by v pixels on each side and then take random crop
assert v <= 10, 'The maximum shift should be less then 10'
padded_size = (img.size[0] + 2*v, img.size[1] + 2*v)
new_img = PIL.Image.new('RGB', padded_size, color=(125, 123, 114))
new_img.paste(img, (v, v))
top = random.randint(0, v*2)
left = random.randint(0, v*2)
new_img = new_img.crop((left, top, left + img.size[0], top + img.size[1]))
return new_img
def Cutout_default(img, v): # Used in FastAA, different from CutoutABS, the actual cutout size can be smaller than v on the boundary
# Passed random number generation test
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
# x = np.random.uniform(w)
# y = np.random.uniform(h)
x = random.uniform(0, w)
y = random.uniform(0, h)
x0 = int(min(w, max(0, x - v // 2))) # clip to the range (0, w)
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def RandCrop(img, _):
v = 4
return mean_pad_randcrop(img, v)
def RandCutout(img, _):
v = 16 # Cutout 0.5 means 0.5*32=16 pixels as in the FastAA paper
return Cutout_default(img, v)
def RandFlip(img, _):
if random.random() > 0.5:
img = Flip(img, None)
return img
def Identity(img, _):
return img
def augment_list(for_autoaug=True, for_DeepAA=False): # 16 oeprations and their ranges
l = [
(ShearX, -0.3, 0.3), # 0
(ShearY, -0.3, 0.3), # 1
(TranslateX, -0.45, 0.45), # 2
(TranslateY, -0.45, 0.45), # 3
(Rotate, -30, 30), # 4
(AutoContrast, 0, 1), # 5
(Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 256), # 8
(Posterize, 4, 8), # 9
(Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
(Cutout, 0, 0.2), # 14
# (SamplePairing(imgs), 0, 0.4), # 15
]
if for_autoaug:
l += [
(CutoutAbs, 0, 20), # compatible with auto-augment
(Posterize2, 0, 4), # 9
(TranslateXAbs, 0, 10), # 9
(TranslateYAbs, 0, 10), # 9
]
if for_DeepAA:
l += [
(Identity, 0., 1.0),
(RandFlip, 0., 1.0), # Additional 15
(RandCutout, 0., 1.0), # 16
(RandCrop, 0., 1.0), # 17
]
return l
augment_dict = {fn.__name__: (fn, v1, v2) for fn, v1, v2 in augment_list()}
def get_augment(name):
return augment_dict[name]
def apply_augment(img, name, level):
augment_fn, low, high = get_augment(name)
return augment_fn(img.copy(), level * (high - low) + low)
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
def fa_reduced_cifar10():
p = [[["Contrast", 0.8320659688593578, 0.49884310562180767], ["TranslateX", 0.41849883971249136, 0.394023086494538]], [["Color", 0.3500483749890918, 0.43355143929883955], ["Color", 0.5120716140300229, 0.7508299643325016]], [["Rotate", 0.9447932604389472, 0.29723465088990375], ["Sharpness", 0.1564936149799504, 0.47169309978091745]], [["Rotate", 0.5430015349185097, 0.6518626678905443], ["Color", 0.5694844928020679, 0.3494533005430269]], [["AutoContrast", 0.5558922032451064, 0.783136004977799], ["TranslateY", 0.683914191471972, 0.7597025305860181]], [["TranslateX", 0.03489224481658926, 0.021025488042663354], ["Equalize", 0.4788637403857401, 0.3535481281496117]], [["Sharpness", 0.6428916269794158, 0.22791511918580576], ["Contrast", 0.016014045073950323, 0.26811312269487575]], [["Rotate", 0.2972727228410451, 0.7654251516829896], ["AutoContrast", 0.16005809254943348, 0.5380523650108116]], [["Contrast", 0.5823671057717301, 0.7521166301398389], ["TranslateY", 0.9949449214751978, 0.9612671341689751]], [["Equalize", 0.8372126687702321, 0.6944127225621206], ["Rotate", 0.25393282929784755, 0.3261658365286546]], [["Invert", 0.8222011603194572, 0.6597915864008403], ["Posterize", 0.31858707654447327, 0.9541013715579584]], [["Sharpness", 0.41314621282107045, 0.9437344470879956], ["Cutout", 0.6610495837889337, 0.674411664255093]], [["Contrast", 0.780121736705407, 0.40826152397463156], ["Color", 0.344019192125256, 0.1942922781355767]], [["Rotate", 0.17153139555621344, 0.798745732456474], ["Invert", 0.6010555860501262, 0.320742172554767]], [["Invert", 0.26816063450777416, 0.27152062163148327], ["Equalize", 0.6786829200236982, 0.7469412443514213]], [["Contrast", 0.3920564414367518, 0.7493644582838497], ["TranslateY", 0.8941657805606704, 0.6580846856375955]], [["Equalize", 0.875509207399372, 0.9061130537645283], ["Cutout", 0.4940280679087308, 0.7896229623628276]], [["Contrast", 0.3331423298065147, 0.7170041362529597], ["ShearX", 0.7425484291842793, 0.5285117152426109]], [["Equalize", 0.97344237365026, 0.4745759720473106], ["TranslateY", 0.055863458430295276, 0.9625142022954672]], [["TranslateX", 0.6810614083109192, 0.7509937355495521], ["TranslateY", 0.3866463019475701, 0.5185481505576112]], [["Sharpness", 0.4751529944753671, 0.550464012488733], ["Cutout", 0.9472914750534814, 0.5584925992985023]], [["Contrast", 0.054606784909375095, 0.17257080196712182], ["Cutout", 0.6077026782754803, 0.7996504165944938]], [["ShearX", 0.328798428243695, 0.2769563264079157], ["Cutout", 0.9037632437023772, 0.4915809476763595]], [["Cutout", 0.6891202672363478, 0.9951490996172914], ["Posterize", 0.06532762462628705, 0.4005246609075227]], [["TranslateY", 0.6908583592523334, 0.725612120376128], ["Rotate", 0.39907735501746666, 0.36505798032223147]], [["TranslateX", 0.10398364107399072, 0.5913918470536627], ["Rotate", 0.7169811539340365, 0.8283850670648724]], [["ShearY", 0.9526373530768361, 0.4482347365639251], ["Contrast", 0.4203947336351471, 0.41526799558953864]], [["Contrast", 0.24894431199700073, 0.09578870500994707], ["Solarize", 0.2273713345927395, 0.6214942914963707]], [["TranslateX", 0.06331228870032912, 0.8961907489444944], ["Cutout", 0.5110007859958743, 0.23704875994050723]], [["Cutout", 0.3769183548846172, 0.6560944580253987], ["TranslateY", 0.7201924599434143, 0.4132476526938319]], [["Invert", 0.6707431156338866, 0.11622795952464149], ["Posterize", 0.12075972752370845, 0.18024933294172307]], [["Color", 0.5010057264087142, 0.5277767327434318], ["Rotate", 0.9486115946366559, 0.31485546630220784]], [["ShearX", 0.31741302466630406, 0.1991215806270692], ["Invert", 0.3744727015523084, 0.6914113986757578]], [["Brightness", 0.40348479064392617, 0.8924182735724888], ["Brightness", 0.1973098763857779, 0.3939288933689655]], [["Color", 0.01208688664030888, 0.6055693000885217], ["Equalize", 0.433259451147881, 0.420711137966155]], [["Cutout", 0.2620018360076487, 0.11594468278143644], ["Rotate", 0.1310401567856766, 0.7244318146544101]], [["ShearX", 0.15249651845933576, 0.35277277071866986], ["Contrast", 0.28221794032094016, 0.42036586509397444]], [["Brightness", 0.8492912150468908, 0.26386920887886056], ["Solarize", 0.8764208056263386, 0.1258195122766067]], [["ShearX", 0.8537058239675831, 0.8415101816171269], ["AutoContrast", 0.23958568830416294, 0.9889049529564014]], [["Rotate", 0.6463207930684552, 0.8750192129056532], ["Contrast", 0.6865032211768652, 0.8564981333033417]], [["Equalize", 0.8877190311811044, 0.7370995897848609], ["TranslateX", 0.9979660314391368, 0.005683998913244781]], [["Color", 0.6420017551677819, 0.6225337265571229], ["Solarize", 0.8344504978566362, 0.8332856969941151]], [["ShearX", 0.7439332981992567, 0.9747608698582039], ["Equalize", 0.6259189804002959, 0.028017478098245174]], [["TranslateY", 0.39794770293366843, 0.8482966537902709], ["Rotate", 0.9312935630405351, 0.5300586925826072]], [["Cutout", 0.8904075572021911, 0.3522934742068766], ["Equalize", 0.6431186289473937, 0.9930577962126151]], [["Contrast", 0.9183553386089476, 0.44974266209396685], ["TranslateY", 0.8193684583123862, 0.9633741156526566]], [["ShearY", 0.616078299924283, 0.19219314358924766], ["Solarize", 0.1480945914138868, 0.05922109541654652]], [["Solarize", 0.25332455064128157, 0.18853037431947994], ["ShearY", 0.9518390093954243, 0.14603930044061142]], [["Color", 0.8094378664335412, 0.37029830225408433], ["Contrast", 0.29504113617467465, 0.065096365468442]], [["AutoContrast", 0.7075167558685455, 0.7084621693458267], ["Sharpness", 0.03555539453323875, 0.5651948313888351]], [["TranslateY", 0.5969982600930229, 0.9857264201029572], ["Rotate", 0.9898628564873607, 0.1985685534926911]], [["Invert", 0.14915939942810352, 0.6595839632446547], ["Posterize", 0.768535289994361, 0.5997358684618563]], [["Equalize", 0.9162691815967111, 0.3331035307653627], ["Color", 0.8169118187605557, 0.7653910258006366]], [["Rotate", 0.43489185299530897, 0.752215269135173], ["Brightness", 0.1569828560334806, 0.8002808712857853]], [["Invert", 0.931876215328345, 0.029428644395760872], ["Equalize", 0.6330036052674145, 0.7235531014288485]], [["ShearX", 0.5216138393704968, 0.849272958911589], ["AutoContrast", 0.19572688655120263, 0.9786551568639575]], [["ShearX", 0.9899586208275011, 0.22580547500610293], ["Brightness", 0.9831311903178727, 0.5055159610855606]], [["Brightness", 0.29179117009211486, 0.48003584672937294], ["Solarize", 0.7544252317330058, 0.05806581735063043]], [["AutoContrast", 0.8919800329537786, 0.8511261613698553], ["Contrast", 0.49199446084551035, 0.7302297140181429]], [["Cutout", 0.7079723710644835, 0.032565015538375874], ["AutoContrast", 0.8259782090388609, 0.7860708789468442]], [["Posterize", 0.9980262659801914, 0.6725084224935673], ["ShearY", 0.6195568269664682, 0.5444170291816751]], [["Posterize", 0.8687351834713217, 0.9978004914422602], ["Equalize", 0.4532646848325955, 0.6486748015710573]], [["Contrast", 0.2713928776950594, 0.15255249557027806], ["ShearY", 0.9276834387970199, 0.5266542862333478]], [["AutoContrast", 0.5240786618055582, 0.9325642258930253], ["Cutout", 0.38448627892037357, 0.21219415055662394]], [["TranslateX", 0.4299517937295352, 0.20133751201386152], ["TranslateX", 0.6753468310276597, 0.6985621035400441]], [["Rotate", 0.4006472499103597, 0.6704748473357586], ["Equalize", 0.674161668148079, 0.6528530101705237]], [["Equalize", 0.9139902833674455, 0.9015103149680278], ["Sharpness", 0.7289667720691948, 0.7623606352376232]], [["Cutout", 0.5911267429414259, 0.5953141187177585], ["Rotate", 0.5219064817468504, 0.11085141355857986]], [["TranslateX", 0.3620095133946267, 0.26194039409492476], ["Rotate", 0.3929841359545597, 0.4913406720338047]], [["Invert", 0.5175298901458896, 0.001661410821811482], ["Invert", 0.004656581318332242, 0.8157622192213624]], [["AutoContrast", 0.013609693335051465, 0.9318651749409604], ["Invert", 0.8980844358979592, 0.2268511862780368]], [["ShearY", 0.7717126261142194, 0.09975547983707711], ["Equalize", 0.7808494401429572, 0.4141412091009955]], [["TranslateX", 0.5878675721341552, 0.29813268038163376], ["Posterize", 0.21257276051591356, 0.2837285296666412]], [["Brightness", 0.4268335108566488, 0.4723784991635417], ["Cutout", 0.9386262901570471, 0.6597686851494288]], [["ShearX", 0.8259423807590159, 0.6215304795389204], ["Invert", 0.6663365779667443, 0.7729669184580387]], [["ShearY", 0.4801338723951297, 0.5220145420100984], ["Solarize", 0.9165803796596582, 0.04299335502862134]], [["Color", 0.17621114853558817, 0.7092601754635434], ["ShearX", 0.9014406936728542, 0.6028711944367818]], [["Rotate", 0.13073284972300658, 0.9088831512880851], ["ShearX", 0.4228105332316806, 0.7985249783662675]], [["Brightness", 0.9182753692730031, 0.0063635477774044436], ["Color", 0.4279825602663798, 0.28727149118585327]], [["Equalize", 0.578218285372267, 0.9611758542158054], ["Contrast", 0.5471552264150691, 0.8819635504027596]], [["Brightness", 0.3208589067274543, 0.45324733565167497], ["Solarize", 0.5218455808633233, 0.5946097503647126]], [["Equalize", 0.3790381278653, 0.8796082535775276], ["Solarize", 0.4875526773149246, 0.5186585878052613]], [["ShearY", 0.12026461479557571, 0.1336953429068397], ["Posterize", 0.34373988646025766, 0.8557727670803785]], [["Cutout", 0.2396745247507467, 0.8123036135209865], ["Equalize", 0.05022807681008945, 0.6648492261984383]], [["Brightness", 0.35226676470748264, 0.5950011514888855], ["Rotate", 0.27555076067000894, 0.9170063321486026]], [["ShearX", 0.320224630647278, 0.9683584649071976], ["Invert", 0.6905585196648905, 0.5929115667894518]], [["Color", 0.9941395717559652, 0.7474441679798101], ["Sharpness", 0.7559998478658021, 0.6656052889626682]], [["ShearY", 0.4004220568345669, 0.5737646992826074], ["Equalize", 0.9983495213746147, 0.8307907033362303]], [["Color", 0.13726809242038207, 0.9378850119950549], ["Equalize", 0.9853362454752445, 0.42670264496554156]], [["Invert", 0.13514636153298576, 0.13516363849081958], ["Sharpness", 0.2031189356693901, 0.6110226359872745]], [["TranslateX", 0.7360305209630797, 0.41849698571655614], ["Contrast", 0.8972161549144564, 0.7820296625565641]], [["Color", 0.02713118828682548, 0.717110684828096], ["TranslateY", 0.8118759006836348, 0.9120098002024992]], [["Sharpness", 0.2915428949403711, 0.7630303724396518], ["Solarize", 0.22030536162851078, 0.38654526772661757]], [["Equalize", 0.9949114839538582, 0.7193630656062793], ["AutoContrast", 0.00889496657931299, 0.2291400476524672]], [["Rotate", 0.7120948976490488, 0.7804359309791055], ["Cutout", 0.10445418104923654, 0.8022999156052766]], [["Equalize", 0.7941710117902707, 0.8648170634288153], ["Invert", 0.9235642581144047, 0.23810725859722381]], [["Cutout", 0.3669397998623156, 0.42612815083245004], ["Solarize", 0.5896322046441561, 0.40525016166956795]], [["Color", 0.8389858785714184, 0.4805764176488667], ["Rotate", 0.7483931487048825, 0.4731174601400677]], [["Sharpness", 0.19006538611394763, 0.9480745790240234], ["TranslateY", 0.13904429049439282, 0.04117685330615939]], [["TranslateY", 0.9958097661701637, 0.34853788612580905], ["Cutout", 0.2235829624082113, 0.3737887095480745]], [["ShearX", 0.635453761342424, 0.6063917273421382], ["Posterize", 0.8738297843709666, 0.4893042590265556]], [["Brightness", 0.7907245198402727, 0.7082189713070691], ["Color", 0.030313003541849737, 0.6927897798493439]], [["Cutout", 0.6965622481073525, 0.8103522907758203], ["ShearY", 0.6186794303078708, 0.28640671575703547]], [["ShearY", 0.43734910588450226, 0.32549342535621517], ["ShearX", 0.08154980987651872, 0.3286764923112455]], [["AutoContrast", 0.5262462005050853, 0.8175584582465848], ["Contrast", 0.8683217097363655, 0.548776281479276]], [["ShearY", 0.03957878500311707, 0.5102350637943197], ["Rotate", 0.13794708520303778, 0.38035687712954236]], [["Sharpness", 0.634288567312677, 0.6387948309075822], ["AutoContrast", 0.13437288694693272, 0.7150448869023095]], [["Contrast", 0.5198339640088544, 0.9409429390321714], ["Cutout", 0.09489154903321972, 0.6228488803821982]], [["Equalize", 0.8955909061806043, 0.7727336527163008], ["AutoContrast", 0.6459479564441762, 0.7065467781139214]], [["Invert", 0.07214420843537739, 0.15334721382249505], ["ShearX", 0.9242027778363903, 0.5809187849982554]], [["Equalize", 0.9144084379856188, 0.9457539278608998], ["Sharpness", 0.14337499858300173, 0.5978054365425495]], [["Posterize", 0.18894269796951202, 0.14676331276539045], ["Equalize", 0.846204299950047, 0.0720601838168885]], [["Contrast", 0.47354445405741163, 0.1793650330107468], ["Solarize", 0.9086106327264657, 0.7578807802091502]], [["AutoContrast", 0.11805466892967886, 0.6773620948318575], ["TranslateX", 0.584222568299264, 0.9475693349391936]], [["Brightness", 0.5833017701352768, 0.6892593824176294], ["AutoContrast", 0.9073141314561828, 0.5823085733964589]], [["TranslateY", 0.5711231614144834, 0.6436240447620021], ["Contrast", 0.21466964050052473, 0.8042843954486391]], [["Contrast", 0.22967904487976765, 0.2343103109298762], ["Invert", 0.5502897289159286, 0.386181060792375]], [["Invert", 0.7008423439928628, 0.4234003051405053], ["Rotate", 0.77270460187611, 0.6650852696828039]], [["Invert", 0.050618322309703534, 0.24277027926683614], ["TranslateX", 0.789703489736613, 0.5116446685339312]], [["Color", 0.363898083076868, 0.7870323584210503], ["ShearY", 0.009608425513626617, 0.6188625018465327]], [["TranslateY", 0.9447601615216088, 0.8605867115798349], ["Equalize", 0.24139180127003634, 0.9587337957930782]], [["Equalize", 0.3968589440144503, 0.626206375426996], ["Solarize", 0.3215967960673186, 0.826785464835443]], [["TranslateX", 0.06947339047121326, 0.016705969558222122], ["Contrast", 0.6203392406528407, 0.6433525559906872]], [["Solarize", 0.2479835265518212, 0.6335009955617831], ["Sharpness", 0.6260191862978083, 0.18998095149428562]], [["Invert", 0.9818841924943431, 0.03252098144087934], ["TranslateY", 0.9740718042586802, 0.32038951753031475]], [["Solarize", 0.8795784664090814, 0.7014953994354041], ["AutoContrast", 0.8508018319577783, 0.09321935255338443]], [["Color", 0.8067046326105318, 0.13732893832354054], ["Contrast", 0.7358549680271418, 0.7880588355974301]], [["Posterize", 0.5005885536838065, 0.7152229305267599], ["ShearX", 0.6714249591308944, 0.7732232697859908]], [["TranslateY", 0.5657943483353953, 0.04622399873706862], ["AutoContrast", 0.2787442688649845, 0.567024378767143]], [["ShearY", 0.7589839214283295, 0.041071003934029404], ["Equalize", 0.3719852873722692, 0.43285778682687326]], [["Posterize", 0.8841266183653291, 0.42441306955476366], ["Cutout", 0.06578801759412933, 0.5961125797961526]], [["Rotate", 0.4057875004314082, 0.20241115848366442], ["AutoContrast", 0.19331542807918067, 0.7175484678480565]], [["Contrast", 0.20331327116693088, 0.17135387852218742], ["Cutout", 0.6282459410351067, 0.6690015305529187]], [["ShearX", 0.4309850328306535, 0.99321178125828], ["AutoContrast", 0.01809604030453338, 0.693838277506365]], [["Rotate", 0.24343531125298268, 0.5326412444169899], ["Sharpness", 0.8663989992597494, 0.7643990609130789]], [["Rotate", 0.9785019204622459, 0.8941922576710696], ["ShearY", 0.3823185048761075, 0.9258854046017292]], [["ShearY", 0.5502613342963388, 0.6193478797355644], ["Sharpness", 0.2212116534610532, 0.6648232390110979]], [["TranslateY", 0.43222920981513757, 0.5657636397633089], ["ShearY", 0.9153733286073634, 0.4868521171273169]], [["Posterize", 0.12246560519738336, 0.9132288825898972], ["Cutout", 0.6058471327881816, 0.6426901876150983]], [["Color", 0.3693970222695844, 0.038929141432555436], ["Equalize", 0.6228052875653781, 0.05064436511347281]], [["Color", 0.7172600331356893, 0.2824542634766688], ["Color", 0.425293116261649, 0.1796441283313972]], [["Cutout", 0.7539608428122959, 0.9896141728228921], ["Solarize", 0.17811081117364758, 0.9064195503634402]], [["AutoContrast", 0.6761242607012717, 0.6484842446399923], ["AutoContrast", 0.1978135076901828, 0.42166879492601317]], [["ShearY", 0.25901666379802524, 0.4770778270322449], ["Solarize", 0.7640963173407052, 0.7548463227094349]], [["TranslateY", 0.9222487731783499, 0.33658389819616463], ["Equalize", 0.9159112511468139, 0.8877136302394797]], [["TranslateX", 0.8994836977137054, 0.11036053676846591], ["Sharpness", 0.9040333410652747, 0.007266095214664592]], [["Invert", 0.627758632524958, 0.8075245097227242], ["Color", 0.7525387912148516, 0.05950239294733184]], [["TranslateX", 0.43505193292761857, 0.38108822876120796], ["TranslateY", 0.7432578052364004, 0.685678116134759]], [["Contrast", 0.9293507582470425, 0.052266842951356196], ["Posterize", 0.45187123977747456, 0.8228290399726782]], [["ShearX", 0.07240786542746291, 0.8945667925365756], ["Brightness", 0.5305443506561034, 0.12025274552427578]], [["Invert", 0.40157564448143335, 0.5364745514006678], ["Posterize", 0.3316124671813876, 0.43002413237035997]], [["ShearY", 0.7152314630009072, 0.1938339083417453], ["Invert", 0.14102478508140615, 0.41047623580174253]], [["Equalize", 0.19862832613849246, 0.5058521685279254], ["Sharpness", 0.16481208629549782, 0.29126323102770557]], [["Equalize", 0.6951591703541872, 0.7294822018800076], ["ShearX", 0.8726656726111219, 0.3151484225786487]], [["Rotate", 0.17234370554263745, 0.9356543193000078], ["TranslateX", 0.4954374070084091, 0.05496727345849217]], [["Contrast", 0.347405480122842, 0.831553005022885], ["ShearX", 0.28946367213071134, 0.11905898704394013]], [["Rotate", 0.28096672507990683, 0.16181284050307398], ["Color", 0.6554918515385365, 0.8739728050797386]], [["Solarize", 0.05408073374114053, 0.5357087283758337], ["Posterize", 0.42457175211495335, 0.051807130609045515]], [["TranslateY", 0.6216669362331361, 0.9691341207381867], ["Rotate", 0.9833579358130944, 0.12227426932415297]], [["AutoContrast", 0.7572619475282892, 0.8062834082727393], ["Contrast", 0.1447865402875591, 0.40242646573228436]], [["Rotate", 0.7035658783466086, 0.9840285268256428], ["Contrast", 0.04613961510519471, 0.7666683217450163]], [["TranslateX", 0.4580462177951252, 0.6448678609474686], ["AutoContrast", 0.14845695613708987, 0.1581134188537895]], [["Color", 0.06795037145259564, 0.9115552821158709], ["TranslateY", 0.9972953449677655, 0.6791016521791214]], [["Cutout", 0.3586908443690823, 0.11578558293480945], ["Color", 0.49083981719164294, 0.6924851425917189]], [["Brightness", 0.7994717831637873, 0.7887316255321768], ["Posterize", 0.01280463502435425, 0.2799086732858721]], [["ShearY", 0.6733451536131859, 0.8122332639516706], ["AutoContrast", 0.20433889615637357, 0.29023346867819966]], [["TranslateY", 0.709913512385177, 0.6538196931503809], ["Invert", 0.06629795606579203, 0.40913219547548296]], [["Sharpness", 0.4704559834362948, 0.4235993305308414], ["Equalize", 0.7578132044306966, 0.9388824249397175]], [["AutoContrast", 0.5281702802395268, 0.8077253610116979], ["Equalize", 0.856446858814119, 0.0479755681647559]], [["Color", 0.8244145826797791, 0.038409264586238945], ["Equalize", 0.4933123249234237, 0.8251940933672189]], [["TranslateX", 0.23949314158035084, 0.13576027004706692], ["ShearX", 0.8547563771688399, 0.8309262160483606]], [["Cutout", 0.4655680937486001, 0.2819807000622825], ["Contrast", 0.8439552665937905, 0.4843617871587037]], [["TranslateX", 0.19142454476784831, 0.7516148119169537], ["AutoContrast", 0.8677128351329768, 0.34967990912346336]], [["Contrast", 0.2997868299880966, 0.919508054854469], ["AutoContrast", 0.3003418493384957, 0.812314984368542]], [["Invert", 0.1070424236198183, 0.614674386498809], ["TranslateX", 0.5010973510899923, 0.20828478805259465]], [["Contrast", 0.6775882415611454, 0.6938564815591685], ["Cutout", 0.4814634264207498, 0.3086844939744179]], [["TranslateY", 0.939427105020265, 0.02531043619423201], ["Contrast", 0.793754257944812, 0.6676072472565451]], [["Sharpness", 0.09833672397575444, 0.5937214638292085], ["Rotate", 0.32530675291753763, 0.08302275740932441]], [["Sharpness", 0.3096455511562728, 0.6726732004553959], ["TranslateY", 0.43268997648796537, 0.8755012330217743]], [["ShearY", 0.9290771880324833, 0.22114736271319912], ["Equalize", 0.5520199288501478, 0.34269650332060553]], [["AutoContrast", 0.39763980746649374, 0.4597414582725454], ["Contrast", 0.941507852412761, 0.24991270562477041]], [["Contrast", 0.19419400547588095, 0.9127524785329233], ["Invert", 0.40544905179551727, 0.770081532844878]], [["Invert", 0.30473757368608334, 0.23534811781828846], ["Cutout", 0.26090722356706686, 0.5478390909877727]], [["Posterize", 0.49434361308057373, 0.05018423270527428], ["Color", 0.3041910676883317, 0.2603810415446437]], [["Invert", 0.5149061746764011, 0.9507449210221298], ["TranslateY", 0.4458076521892904, 0.8235358255774426]], [["Cutout", 0.7900006753351625, 0.905578861382507], ["Cutout", 0.6707153655762056, 0.8236715672258502]], [["Solarize", 0.8750534386579575, 0.10337670467100568], ["Posterize", 0.6102379615481381, 0.9264503915416868]], [["ShearY", 0.08448689377082852, 0.13981233725811626], ["TranslateX", 0.13979689669329498, 0.768774869872818]], [["TranslateY", 0.35752572266759985, 0.22827299847812488], ["Solarize", 0.3906957174236011, 0.5663314388307709]], [["ShearY", 0.29155240367061563, 0.8427516352971683], ["ShearX", 0.988825367441916, 0.9371258864857649]], [["Posterize", 0.3470780859769458, 0.5467686612321239], ["Rotate", 0.5758606274160093, 0.8843838082656007]], [["Cutout", 0.07825368363221841, 0.3230799425855425], ["Equalize", 0.2319163865298529, 0.42133965674727325]], [["Invert", 0.41972172597448654, 0.34618622513582953], ["ShearX", 0.33638469398198834, 0.9098575535928108]], [["Invert", 0.7322652233340448, 0.7747502957687412], ["Cutout", 0.9643121397298106, 0.7983335094634907]], [["TranslateY", 0.30039942808098496, 0.229018798182827], ["TranslateY", 0.27009499739380194, 0.6435577237846236]], [["Color", 0.38245274994070644, 0.7030758568461645], ["ShearX", 0.4429321461666281, 0.6963787864044149]], [["AutoContrast", 0.8432798685515605, 0.5775214369578088], ["Brightness", 0.7140899735355927, 0.8545854720117658]], [["Rotate", 0.14418935535613786, 0.5637968282213426], ["Color", 0.7115231912479835, 0.32584796564566776]], [["Sharpness", 0.4023501062807533, 0.4162097130412771], ["Brightness", 0.5536372686153666, 0.03004023273348777]], [["TranslateX", 0.7526053265574295, 0.5365938133399961], ["Cutout", 0.07914142706557492, 0.7544953091603148]], [["TranslateY", 0.6932934644882822, 0.5302211727137424], ["Invert", 0.5040606028391255, 0.6074863635108957]], [["Sharpness", 0.5013938602431629, 0.9572417724333157], ["TranslateY", 0.9160516359783026, 0.41798927975391675]], [["ShearY", 0.5130018836722556, 0.30209438428424185], ["Color", 0.15017170588500262, 0.20653495360587826]], [["TranslateX", 0.5293300090022314, 0.6407011888285266], ["Rotate", 0.4809817860439001, 0.3537850070371702]], [["Equalize", 0.42243081336551014, 0.13472721311046565], ["Posterize", 0.4700309639484068, 0.5197704360874883]], [["AutoContrast", 0.40674959899687235, 0.7312824868168921], ["TranslateX", 0.7397527975920833, 0.7068339877944815]], [["TranslateY", 0.5880995184787206, 0.41294111378078946], ["ShearX", 0.3181387627799316, 0.4810010147143413]], [["Color", 0.9898680233928507, 0.13241525577655167], ["Contrast", 0.9824932511238534, 0.5081145010853807]], [["Invert", 0.1591854062582687, 0.9760371953250404], ["Color", 0.9913399302056851, 0.8388709501056177]], [["Rotate", 0.6427451962231163, 0.9486793975292853], ["AutoContrast", 0.8501937877930463, 0.021326757974406196]], [["Contrast", 0.13611684531087598, 0.3050858709483848], ["Posterize", 0.06618644756084646, 0.8776928511951034]], [["TranslateX", 0.41021065663839407, 0.4965319749091702], ["Rotate", 0.07088831484595115, 0.4435516708223345]], [["Sharpness", 0.3151707977154323, 0.28275482520179296], ["Invert", 0.36980384682133804, 0.20813616084536624]], [["Cutout", 0.9979060206661017, 0.39712948644725854], ["Brightness", 0.42451052896163466, 0.942623075649937]], [["Equalize", 0.5300853308425644, 0.010183500830128867], ["AutoContrast", 0.06930788523716991, 0.5403125318991522]], [["Contrast", 0.010385458959237814, 0.2588311035539086], ["ShearY", 0.9347048553928764, 0.10439028366854963]], [["ShearY", 0.9867649486508592, 0.8409258132716434], ["ShearX", 0.48031199530836444, 0.7703375364614137]], [["ShearY", 0.04835889473136512, 0.2671081675890492], ["Brightness", 0.7856432618509617, 0.8032169570159564]], [["Posterize", 0.11112884927351185, 0.7116956530752987], ["TranslateY", 0.7339151092128607, 0.3331241226029017]], [["Invert", 0.13527036207875454, 0.8425980515358883], ["Color", 0.7836395778298139, 0.5517059252678862]], [["Sharpness", 0.012541163521491816, 0.013197550692292892], ["Invert", 0.6295957932861318, 0.43276521236056054]], [["AutoContrast", 0.7681480991225756, 0.3634284648496289], ["Brightness", 0.09708289828517969, 0.45016725043529726]], [["Brightness", 0.5839450499487329, 0.47525965678316795], ["Posterize", 0.43096581990183735, 0.9332382960125196]], [["Contrast", 0.9725334964552795, 0.9142902966863341], ["Contrast", 0.12376116410622995, 0.4355916974126801]], [["TranslateX", 0.8572708473690132, 0.02544522678265526], ["Sharpness", 0.37902120723460364, 0.9606092969833118]], [["TranslateY", 0.8907359001296927, 0.8011363927236099], ["Color", 0.7693777154407178, 0.0936768686746503]], [["Equalize", 0.0002657688243309364, 0.08190798535970034], ["Rotate", 0.5215478065240905, 0.5773519995038368]], [["TranslateY", 0.3383007813932477, 0.5733428274739165], ["Sharpness", 0.2436110797174722, 0.4757790814590501]], [["Cutout", 0.0957402176213592, 0.8914395928996034], ["Cutout", 0.4959915628586883, 0.25890349461645246]], [["AutoContrast", 0.594787300189186, 0.9627455357634459], ["ShearY", 0.5136027621132064, 0.10419602450259002]], [["Solarize", 0.4684077211553732, 0.6592850629431414], ["Sharpness", 0.2382385935956325, 0.6589291408243176]], [["Cutout", 0.4478786947325877, 0.6893616643143388], ["TranslateX", 0.2761781720270474, 0.21750622627277727]], [["Sharpness", 0.39476077929016484, 0.930902796668923], ["Cutout", 0.9073012208742808, 0.9881122386614257]], [["TranslateY", 0.0933719180021565, 0.7206252503441172], ["ShearX", 0.5151400441789256, 0.6307540083648309]], [["AutoContrast", 0.7772689258806401, 0.8159317013156503], ["AutoContrast", 0.5932793713915097, 0.05262217353927168]], [["Equalize", 0.38017352056118914, 0.8084724050448412], ["ShearY", 0.7239725628380852, 0.4246314890359326]], [["Cutout", 0.741157483503503, 0.13244380646497977], ["Invert", 0.03395378056675935, 0.7140036618098844]], [["Rotate", 0.0662727247460636, 0.7099861732415447], ["Rotate", 0.3168532707508249, 0.3553167425022127]], [["AutoContrast", 0.7429303516734129, 0.07117444599776435], ["Posterize", 0.5379537435918104, 0.807221330263993]], [["TranslateY", 0.9788586874795164, 0.7967243851346594], ["Invert", 0.4479103376922362, 0.04260360776727545]], [["Cutout", 0.28318121763188997, 0.7748680701406292], ["AutoContrast", 0.9109258369403016, 0.17126397858002085]], [["Color", 0.30183727885272027, 0.46718354750112456], ["TranslateX", 0.9628952256033627, 0.10269543754135535]], [["AutoContrast", 0.6316709389784041, 0.84287698792044], ["Brightness", 0.5544761629904337, 0.025264772745200004]], [["Rotate", 0.08803313299532567, 0.306059720523696], ["Invert", 0.5222165872425064, 0.045935208620454304]], [["TranslateY", 0.21912346831923835, 0.48529224559004436], ["TranslateY", 0.15466734731903942, 0.8929485418495068]], [["ShearX", 0.17141022847016563, 0.8607600402165531], ["ShearX", 0.6890511341106859, 0.7540899265679949]], [["Invert", 0.9417455522972059, 0.9021733684991224], ["Solarize", 0.7693107057723746, 0.7268007946568782]], [["Posterize", 0.02376991543373752, 0.6768442864453844], ["Rotate", 0.7736875065112697, 0.6706331753139825]], [["Contrast", 0.3623841610390669, 0.15023657344457686], ["Equalize", 0.32975472189318666, 0.05629246869510651]], [["Sharpness", 0.7874882420165824, 0.49535778020457066], ["Posterize", 0.09485578893387558, 0.6170768580482466]], [["Brightness", 0.7099280202949585, 0.021523012961427335], ["Posterize", 0.2076371467666719, 0.17168118578815206]], [["Color", 0.8546367645761538, 0.832011891505731], ["Equalize", 0.6429734783051777, 0.2618995960561532]], [["Rotate", 0.8780793721476224, 0.5920897827664297], ["ShearX", 0.5338303685064825, 0.8605424531336439]], [["Sharpness", 0.7504493806631884, 0.9723552387375258], ["Sharpness", 0.3206385634203266, 0.45127845905824693]], [["ShearX", 0.23794709526711355, 0.06257530645720066], ["Solarize", 0.9132374030587093, 0.6240819934824045]], [["Sharpness", 0.790583587969259, 0.28551171786655405], ["Contrast", 0.39872982844590554, 0.09644706751019538]], [["Equalize", 0.30681999237432944, 0.5645045018157916], ["Posterize", 0.525966242669736, 0.7360106111256014]], [["TranslateX", 0.4881014179825114, 0.6317220208872226], ["ShearX", 0.2935158995550958, 0.23104608987381758]], [["Rotate", 0.49977116738568395, 0.6610761068306319], ["TranslateY", 0.7396566602715687, 0.09386747830045217]], [["ShearY", 0.5909773790018789, 0.16229529902832718], ["Equalize", 0.06461394468918358, 0.6661349001143908]], [["TranslateX", 0.7218443721851834, 0.04435720302810153], ["Cutout", 0.986686540951642, 0.734771197038724]], [["ShearX", 0.5353800096911666, 0.8120139502148365], ["Equalize", 0.4613239578449774, 0.5159528929124512]], [["Color", 0.0871713897628631, 0.7708895183198486], ["Solarize", 0.5811386808912219, 0.35260648120785887]], [["Posterize", 0.3910857927477053, 0.4329219555775561], ["Color", 0.9115983668789468, 0.6043069944145293]], [["Posterize", 0.07493067637060635, 0.4258000066006725], ["AutoContrast", 0.4740957581389772, 0.49069587151651295]], [["Rotate", 0.34086200894268937, 0.9812149332288828], ["Solarize", 0.6801012471371733, 0.17271491146753837]], [["Color", 0.20542270872895207, 0.5532087457727624], ["Contrast", 0.2718692536563381, 0.20313287569510108]], [["Equalize", 0.05199827210980934, 0.0832859890912212], ["AutoContrast", 0.8092395764794107, 0.7778945136511004]], [["Sharpness", 0.1907689513066838, 0.7705754572256907], ["Color", 0.3911178658498049, 0.41791326933095485]], [["Solarize", 0.19611855804748257, 0.2407807485604081], ["AutoContrast", 0.5343964972940493, 0.9034209455548394]], [["Color", 0.43586520148538865, 0.4711164626521439], ["ShearY", 0.28635408186820555, 0.8417816793020271]], [["Cutout", 0.09818482420382535, 0.1649767430954796], ["Cutout", 0.34582392911178494, 0.3927982995799828]], [["ShearX", 0.001253882705272269, 0.48661629027584596], ["Solarize", 0.9229221435457137, 0.44374894836659073]], [["Contrast", 0.6829734655718668, 0.8201750485099037], ["Cutout", 0.7886756837648936, 0.8423285219631946]], [["TranslateY", 0.857017093561528, 0.3038537151773969], ["Invert", 0.12809228606383538, 0.23637166191748027]], [["Solarize", 0.9829027723424164, 0.9723093910674763], ["Color", 0.6346495302126811, 0.5405494753107188]], [["AutoContrast", 0.06868643520377715, 0.23758659417688077], ["AutoContrast", 0.6648225411500879, 0.5618315648260103]], [["Invert", 0.44202305603311676, 0.9945938909685547], ["Equalize", 0.7991650497684454, 0.16014142656347097]], [["AutoContrast", 0.8778631604769588, 0.03951977631894088], ["ShearY", 0.8495160088963707, 0.35771447321250416]], [["Color", 0.5365078341001592, 0.21102444169782308], ["ShearX", 0.7168869678248874, 0.3904298719872734]], [["TranslateX", 0.6517203786101899, 0.6467598990650437], ["Invert", 0.26552491504364517, 0.1210812827294625]], [["Posterize", 0.35196021684368994, 0.8420648319941891], ["Invert", 0.7796829363930631, 0.9520895999240896]], [["Sharpness", 0.7391572148971984, 0.4853940393452846], ["TranslateX", 0.7641915295592839, 0.6351349057666782]], [["Posterize", 0.18485880221115913, 0.6117603277356728], ["Rotate", 0.6541660490605724, 0.5704041108375348]], [["TranslateY", 0.27517423188070533, 0.6610080904072458], ["Contrast", 0.6091250547289317, 0.7702443247557892]], [["Equalize", 0.3611798581067118, 0.6623615672642768], ["TranslateX", 0.9537265090885917, 0.06352772509358584]], [["ShearX", 0.09720029389103535, 0.7800423126320308], ["Invert", 0.30314352455858884, 0.8519925470889914]], [["Brightness", 0.06931529763458055, 0.57760829499712], ["Cutout", 0.637251974467394, 0.7184346129191052]], [["AutoContrast", 0.5026722100286064, 0.32025257156541886], ["Contrast", 0.9667478703047919, 0.14178519432669368]], [["Equalize", 0.5924463845816984, 0.7187610262181517], ["TranslateY", 0.7059479079159405, 0.06551471830655187]], [["Sharpness", 0.18161164512332928, 0.7576138481173385], ["Brightness", 0.19191138767695282, 0.7865880269424701]], [["Brightness", 0.36780861866078696, 0.0677855546737901], ["AutoContrast", 0.8491446654142264, 0.09217782099938121]], [["TranslateY", 0.06011399855120858, 0.8374487034710264], ["TranslateY", 0.8373922962070498, 0.1991295720254297]], [["Posterize", 0.702559916122481, 0.30257509683007755], ["Rotate", 0.249899495398891, 0.9370437251176267]], [["ShearX", 0.9237874098232075, 0.26241907483351146], ["Brightness", 0.7221766836146657, 0.6880749752986671]], [["Cutout", 0.37994098189193104, 0.7836874473657957], ["ShearX", 0.9212861960976824, 0.8140948561570449]], [["Posterize", 0.2584098274786417, 0.7990847652004848], ["Invert", 0.6357731737590063, 0.1066304859116326]], [["Sharpness", 0.4412790857539922, 0.9692465283229825], ["Color", 0.9857401617339051, 0.26755393929808713]], [["Equalize", 0.22348671644912665, 0.7370019910830038], ["Posterize", 0.5396106339575417, 0.5559536849843303]], [["Equalize", 0.8742967663495852, 0.2797122599926307], ["Rotate", 0.4697322053105951, 0.8769872942579476]], [["Sharpness", 0.44279911640509206, 0.07729581896071613], ["Cutout", 0.3589177366154631, 0.2704031551235969]], [["TranslateX", 0.614216412574085, 0.47929659784170453], ["Brightness", 0.6686234118438007, 0.05700784068205689]], [["ShearY", 0.17920614630857634, 0.4699685075827862], ["Color", 0.38251870810870003, 0.7262706923005887]], [["Solarize", 0.4951799001144561, 0.212775278026479], ["TranslateX", 0.8666105646463097, 0.6750496637519537]], [["Color", 0.8110864170849051, 0.5154263861958484], ["Sharpness", 0.2489044083898776, 0.3763372541462343]], [["Cutout", 0.04888193613483871, 0.06041664638981603], ["Color", 0.06438587718683708, 0.5797881428892969]], [["Rotate", 0.032427448352152166, 0.4445797818376559], ["Posterize", 0.4459357828482998, 0.5879865187630777]], [["ShearX", 0.1617179557693058, 0.050796802246318884], ["Cutout", 0.8142465452060423, 0.3836391305618707]], [["TranslateY", 0.1806857249209416, 0.36697730355422675], ["Rotate", 0.9897576550818276, 0.7483432452225264]], [["Brightness", 0.18278016458098223, 0.952352527690299], ["Cutout", 0.3269735224453044, 0.3924869905012752]], [["ShearX", 0.870832707718742, 0.3214743207190739], ["Cutout", 0.6805560681792573, 0.6984188155282459]], [["TranslateX", 0.4157118388833776, 0.3964216288135384], ["TranslateX", 0.3253012682285006, 0.624835513104391]], [["Contrast", 0.7678168037628158, 0.31033802162621793], ["ShearX", 0.27022424855977134, 0.3773245605126201]], [["TranslateX", 0.37812621869017593, 0.7657993810740699], ["Rotate", 0.18081890120092914, 0.8893511219618171]], [["Posterize", 0.8735859716088367, 0.18243793043074286], ["TranslateX", 0.90435994250313, 0.24116383818819453]], [["Invert", 0.06666709253664793, 0.3881076083593933], ["TranslateX", 0.3783333964963522, 0.14411014979589543]], [["Equalize", 0.8741147867162096, 0.14203839235846816], ["TranslateX", 0.7801536758037405, 0.6952401607812743]], [["Cutout", 0.6095335117944475, 0.5679026063718094], ["Posterize", 0.06433868172233115, 0.07139559616012303]], [["TranslateY", 0.3020364047315408, 0.21459810361176246], ["Cutout", 0.7097677414888889, 0.2942144632587549]], [["Brightness", 0.8223662419048653, 0.195700694016108], ["Invert", 0.09345407040803999, 0.779843655582099]], [["TranslateY", 0.7353462929356228, 0.0468520680237382], ["Cutout", 0.36530918247940425, 0.3897292909049672]], [["Invert", 0.9676896451721213, 0.24473302189463453], ["Invert", 0.7369271521408992, 0.8193267003356975]], [["Sharpness", 0.8691871972054326, 0.4441713912682772], ["ShearY", 0.47385584832119887, 0.23521684584675429]], [["ShearY", 0.9266946026184021, 0.7611986713358834], ["TranslateX", 0.6195820760253926, 0.14661428669483678]], [["Sharpness", 0.08470870576026868, 0.3380219099907229], ["TranslateX", 0.3062343307496658, 0.7135777338095889]], [["Sharpness", 0.5246448204194909, 0.3193061215236702], ["ShearX", 0.8160637208508432, 0.9720697396582731]], [["Posterize", 0.5249259956549405, 0.3492042382504774], ["Invert", 0.8183138799547441, 0.11107271762524618]], [["TranslateY", 0.210869733350744, 0.7138905840721885], ["Sharpness", 0.7773226404450125, 0.8005353621959782]], [["Posterize", 0.33067522385556025, 0.32046239220630124], ["AutoContrast", 0.18918147708798405, 0.4646281070474484]], [["TranslateX", 0.929502026131094, 0.8029128121556285], ["Invert", 0.7319794306118105, 0.5421878712623392]], [["ShearX", 0.25645940834182723, 0.42754710760160963], ["ShearX", 0.44640695310173306, 0.8132185532296811]], [["Color", 0.018436846416536312, 0.8439313862001113], ["Sharpness", 0.3722867661453415, 0.5103570873163251]], [["TranslateX", 0.7285989086776543, 0.4809027697099264], ["TranslateY", 0.9740807004893643, 0.8241085438636939]], [["Posterize", 0.8721868989693397, 0.5700907310383815], ["Posterize", 0.4219074410577852, 0.8032643572845402]], [["Contrast", 0.9811380092558266, 0.8498397471632105], ["Sharpness", 0.8380884329421594, 0.18351306571903125]], [["TranslateY", 0.3878939366762001, 0.4699103438753077], ["Invert", 0.6055556353233807, 0.8774727658400134]], [["TranslateY", 0.052317005261018346, 0.39471450378745787], ["ShearX", 0.8612486845942395, 0.28834103278807466]], [["Color", 0.511993351208063, 0.07251427040525904], ["Solarize", 0.9898097047354855, 0.299761565689576]], [["Equalize", 0.2721248231619904, 0.6870975927455507], ["Cutout", 0.8787327242363994, 0.06228061428917098]], [["Invert", 0.8931880335225408, 0.49720931867378193], ["Posterize", 0.9619698792159256, 0.17859639696940088]], [["Posterize", 0.0061688075074411985, 0.08082938731035938], ["Brightness", 0.27745128028826993, 0.8638528796903816]], [["ShearY", 0.9140200609222026, 0.8240421430867707], ["Invert", 0.651734417415332, 0.08871906369930926]], [["Color", 0.45585010413511196, 0.44705070078574316], ["Color", 0.26394624901633146, 0.11242877788650807]], [["ShearY", 0.9200278466372522, 0.2995901331149652], ["Cutout", 0.8445407215116278, 0.7410524214287446]], [["ShearY", 0.9950483746990132, 0.112964468262847], ["ShearY", 0.4118332303218585, 0.44839613407553636]], [["Contrast", 0.7905821952255192, 0.23360046159385106], ["Posterize", 0.8611787233956044, 0.8984260048943528]], [["TranslateY", 0.21448061359312853, 0.8228112806838331], ["Contrast", 0.8992297266152983, 0.9179231590570998]], [["Invert", 0.3924194798946006, 0.31830516468371495], ["Rotate", 0.8399556845248508, 0.3764892022932781]], [["Cutout", 0.7037916990046816, 0.9214620769502728], ["AutoContrast", 0.02913794613018239, 0.07808607528954048]], [["ShearY", 0.6041490474263381, 0.6094184590800105], ["Equalize", 0.2932954517354919, 0.5840888946081727]], [["ShearX", 0.6056801676269449, 0.6948580442549543], ["Cutout", 0.3028001021044615, 0.15117101733894078]], [["Brightness", 0.8011486803860253, 0.18864079729374195], ["Solarize", 0.014965327213230961, 0.8842620292527029]], [["Invert", 0.902244007904273, 0.5634673798052033], ["Equalize", 0.13422913507398349, 0.4110956745883727]], [["TranslateY", 0.9981773319103838, 0.09568550987216096], ["Color", 0.7627662124105109, 0.8494409737419493]], [["Cutout", 0.3013527640416782, 0.03377226729898486], ["ShearX", 0.5727964831614619, 0.8784196638222834]], [["TranslateX", 0.6050722426803684, 0.3650103962378708], ["TranslateX", 0.8392084589130886, 0.6479816470292911]], [["Rotate", 0.5032806606500023, 0.09276980118866307], ["TranslateY", 0.7800234515261191, 0.18896454379343308]], [["Invert", 0.9266027256244017, 0.8246111062199752], ["Contrast", 0.12112023357797697, 0.33870762271759436]], [["Brightness", 0.8688784756993134, 0.17263759696106606], ["ShearX", 0.5133700431071326, 0.6686811994542494]], [["Invert", 0.8347840440941976, 0.03774897445901726], ["Brightness", 0.24925057499276548, 0.04293631677355758]], [["Color", 0.5998145279485104, 0.4820093200092529], ["TranslateY", 0.6709586184077769, 0.07377334081382858]], [["AutoContrast", 0.7898846202957984, 0.325293526672498], ["Contrast", 0.5156435596826767, 0.2889223168660645]], [["ShearX", 0.08147389674998307, 0.7978924681113669], ["Contrast", 0.7270003309106291, 0.009571215234092656]], [["Sharpness", 0.417607614440786, 0.9532566433338661], ["Posterize", 0.7186586546796782, 0.6936509907073302]], [["ShearX", 0.9555300215926675, 0.1399385550263872], ["Color", 0.9981041061848231, 0.5037462398323248]], [["Equalize", 0.8003487831375474, 0.5413759363796945], ["ShearY", 0.0026607045117773565, 0.019262273030984933]], [["TranslateY", 0.04845391502469176, 0.10063445212118283], ["Cutout", 0.8273170186786745, 0.5045257728554577]], [["TranslateX", 0.9690985344978033, 0.505202991815533], ["TranslateY", 0.7255326592928096, 0.02103609500701631]], [["Solarize", 0.4030771176836736, 0.8424237871457034], ["Cutout", 0.28705805963928965, 0.9601617893682582]], [["Sharpness", 0.16865290353070606, 0.6899673563468826], ["Posterize", 0.3985430034869616, 0.6540651997730774]], [["ShearY", 0.21395578485362032, 0.09519358818949009], ["Solarize", 0.6692821708524135, 0.6462523623552485]], [["AutoContrast", 0.912360598054091, 0.029800239085051583], ["Invert", 0.04319256403746308, 0.7712501517098587]], [["ShearY", 0.9081969961839055, 0.4581560239984739], ["AutoContrast", 0.5313894814729159, 0.5508393335751848]], [["ShearY", 0.860528568424097, 0.8196987216301588], ["Posterize", 0.41134650331494205, 0.3686632018978778]], [["AutoContrast", 0.8753670810078598, 0.3679438326304749], ["Invert", 0.010444228965415858, 0.9581244779208277]], [["Equalize", 0.07071836206680682, 0.7173594756186462], ["Brightness", 0.06111434312497388, 0.16175064669049277]], [["AutoContrast", 0.10522219073562122, 0.9768776621069855], ["TranslateY", 0.2744795945215529, 0.8577967957127298]], [["AutoContrast", 0.7628146493166175, 0.996157376418147], ["Contrast", 0.9255565598518469, 0.6826126662976868]], [["TranslateX", 0.017225816199011312, 0.2470332491402908], ["Solarize", 0.44048494909493807, 0.4492422515972162]], [["ShearY", 0.38885252627795064, 0.10272256704901939], ["Equalize", 0.686154959829183, 0.8973517148655337]], [["Rotate", 0.29628991573592967, 0.16639926575004715], ["ShearX", 0.9013782324726413, 0.0838318162771563]], [["Color", 0.04968391374688563, 0.6138600739645352], ["Invert", 0.11177127838716283, 0.10650198522261578]], [["Invert", 0.49655016367624016, 0.8603374164829688], ["ShearY", 0.40625439617553727, 0.4516437918820778]], [["TranslateX", 0.15015718916062992, 0.13867777502116208], ["Brightness", 0.3374464418810188, 0.7613355669536931]], [["Invert", 0.644644393321966, 0.19005804481199562], ["AutoContrast", 0.2293259789431853, 0.30335723256340186]], [["Solarize", 0.004968793254801596, 0.5370892072646645], ["Contrast", 0.9136902637865596, 0.9510587477779084]], [["Rotate", 0.38991518440867123, 0.24796987467455756], ["Sharpness", 0.9911180315669776, 0.5265657122981591]], [["Solarize", 0.3919646484436238, 0.6814994037194909], ["Sharpness", 0.4920838987787103, 0.023425724294012018]], [["TranslateX", 0.25107587874378867, 0.5414936560189212], ["Cutout", 0.7932919623814599, 0.9891303444820169]], [["Brightness", 0.07863012174272999, 0.045175652208389594], ["Solarize", 0.889609658064552, 0.8228793315963948]], [["Cutout", 0.20477096178169596, 0.6535063675027364], ["ShearX", 0.9216318577173639, 0.2908690977359947]], [["Contrast", 0.7035118947423187, 0.45982709058312454], ["Contrast", 0.7130268070749464, 0.8635123354235471]], [["Sharpness", 0.26319477541228997, 0.7451278726847078], ["Rotate", 0.8170499362173754, 0.13998593411788207]], [["Rotate", 0.8699365715164192, 0.8878057721750832], ["Equalize", 0.06682350555715044, 0.7164702080630689]], [["ShearY", 0.3137466057521987, 0.6747433496011368], ["Rotate", 0.42118828936218133, 0.980121180104441]], [["Solarize", 0.8470375049950615, 0.15287589264139223], ["Cutout", 0.14438435054693055, 0.24296463267973512]], [["TranslateY", 0.08822241792224905, 0.36163911974799356], ["TranslateY", 0.11729726813270003, 0.6230889726445291]], [["ShearX", 0.7720112337718541, 0.2773292905760122], ["Sharpness", 0.756290929398613, 0.27830353710507705]], [["Color", 0.33825031007968287, 0.4657590047522816], ["ShearY", 0.3566628994713067, 0.859750504071925]], [["TranslateY", 0.06830147433378053, 0.9348778582086664], ["TranslateX", 0.15509346516378553, 0.26320778885339435]], [["Posterize", 0.20266751150740858, 0.008351463842578233], ["Sharpness", 0.06506971109417259, 0.7294471760284555]], [["TranslateY", 0.6278911394418829, 0.8702181892620695], ["Invert", 0.9367073860264247, 0.9219230428944211]], [["Sharpness", 0.1553425337673321, 0.17601557714491345], ["Solarize", 0.7040449681338888, 0.08764313147327729]], [["Equalize", 0.6082233904624664, 0.4177428549911376], ["AutoContrast", 0.04987405274618151, 0.34516208204700916]], [["Brightness", 0.9616085936167699, 0.14561237331885468], ["Solarize", 0.8927707736296572, 0.31176907850205704]], [["Brightness", 0.6707778304730988, 0.9046457117525516], ["Brightness", 0.6801448953060988, 0.20015313057149042]], [["Color", 0.8292680845499386, 0.5181603879593888], ["Brightness", 0.08549161770369762, 0.6567870536463203]], [["ShearY", 0.267802208078051, 0.8388133819588173], ["Sharpness", 0.13453409120796123, 0.10028351311149486]], [["Posterize", 0.775796593610272, 0.05359034561289766], ["Cutout", 0.5067360625733027, 0.054451986840317934]], [["TranslateX", 0.5845238647690084, 0.7507147553486293], ["Brightness", 0.2642051786121197, 0.2578358927056452]], [["Cutout", 0.10787517610922692, 0.8147986902794228], ["Contrast", 0.2190149206329539, 0.902210615462459]], [["TranslateX", 0.5663614214181296, 0.05309965916414028], ["ShearX", 0.9682797885154938, 0.41791929533938466]], [["ShearX", 0.2345325577621098, 0.383780128037189], ["TranslateX", 0.7298083748149163, 0.644325797667087]], [["Posterize", 0.5138725709682734, 0.7901809917259563], ["AutoContrast", 0.7966018627776853, 0.14529337543427345]], [["Invert", 0.5973031989249785, 0.417399314592829], ["Solarize", 0.9147539948653116, 0.8221272315548086]], [["Posterize", 0.601596043336383, 0.18969646160963938], ["Color", 0.7527275484079655, 0.431793831326888]], [["Equalize", 0.6731483454430538, 0.7866786558207602], ["TranslateX", 0.97574396899191, 0.5970255778044692]], [["Cutout", 0.15919495850169718, 0.8916094305850562], ["Invert", 0.8351348834751027, 0.4029937360314928]], [["Invert", 0.5894085405226027, 0.7283806854157764], ["Brightness", 0.3973976860470554, 0.949681121498567]], [["AutoContrast", 0.3707914135327408, 0.21192068592079616], ["ShearX", 0.28040127351140676, 0.6754553511344856]], [["Solarize", 0.07955132378694896, 0.15073572961927306], ["ShearY", 0.5735850168851625, 0.27147326850217746]], [["Equalize", 0.678653949549764, 0.8097796067861455], ["Contrast", 0.2283048527510083, 0.15507804874474185]], [["Equalize", 0.286013868374536, 0.186785848694501], ["Posterize", 0.16319021740810458, 0.1201304443285659]], [["Sharpness", 0.9601590830563757, 0.06267915026513238], ["AutoContrast", 0.3813920685124327, 0.294224403296912]], [["Brightness", 0.2703246632402241, 0.9168405377492277], ["ShearX", 0.6156009855831097, 0.4955986055846403]], [["Color", 0.9065504424987322, 0.03393612216080133], ["ShearY", 0.6768595880405884, 0.9981068127818191]], [["Equalize", 0.28812842368483904, 0.300387487349145], ["ShearY", 0.28812248704858345, 0.27105076231533964]], [["Brightness", 0.6864882730513477, 0.8205553299102412], ["Cutout", 0.45995236371265424, 0.5422030370297759]], [["Color", 0.34941404877084326, 0.25857961830158516], ["AutoContrast", 0.3451390878441899, 0.5000938249040454]], [["Invert", 0.8268247541815854, 0.6691380821226468], ["Cutout", 0.46489193601530476, 0.22620873109485895]], [["Rotate", 0.17879730528062376, 0.22670425330593935], ["Sharpness", 0.8692795688221834, 0.36586055020855723]], [["Brightness", 0.31203975139659634, 0.6934046293010939], ["Cutout", 0.31649437872271236, 0.08078625004157935]], [["Cutout", 0.3119482836150119, 0.6397160035509996], ["Contrast", 0.8311248624784223, 0.22897510169718616]], [["TranslateX", 0.7631157841429582, 0.6482890521284557], ["Brightness", 0.12681196272427664, 0.3669813784257344]], [["TranslateX", 0.06027722649179801, 0.3101104512201861], ["Sharpness", 0.5652076706249394, 0.05210008400968136]], [["AutoContrast", 0.39213552101583127, 0.5047021194355596], ["ShearY", 0.7164003055682187, 0.8063370761002899]], [["Solarize", 0.9574307011238342, 0.21472064809226854], ["AutoContrast", 0.8102612285047174, 0.716870148067014]], [["Rotate", 0.3592634277567387, 0.6452602893051465], ["AutoContrast", 0.27188430331411506, 0.06003099168464854]], [["Cutout", 0.9529536554825503, 0.5285505311027461], ["Solarize", 0.08478231903311029, 0.15986449762728216]], [["TranslateY", 0.31176130458018936, 0.5642853506158253], ["Equalize", 0.008890883901317648, 0.5146121040955942]], [["Color", 0.40773645085566157, 0.7110398926612682], ["Color", 0.18233100156439364, 0.7830036002758337]], [["Posterize", 0.5793809197821732, 0.043748553135581236], ["Invert", 0.4479962016131668, 0.7349663010359488]], [["TranslateX", 0.1994882312299382, 0.05216859488899439], ["Rotate", 0.48288726352035416, 0.44713829026777585]], [["Posterize", 0.22122838185154603, 0.5034546841241283], ["TranslateX", 0.2538745835410222, 0.6129055170893385]], [["Color", 0.6786559960640814, 0.4529749369803212], ["Equalize", 0.30215879674415336, 0.8733394611096772]], [["Contrast", 0.47316062430673456, 0.46669538897311447], ["Invert", 0.6514906551984854, 0.3053339444067804]], [["Equalize", 0.6443202625334524, 0.8689731394616441], ["Color", 0.7549183794057628, 0.8889001426329578]], [["Solarize", 0.616709740662654, 0.7792180816399313], ["ShearX", 0.9659155537406062, 0.39436937531179495]], [["Equalize", 0.23694011299406226, 0.027711152164392128], ["TranslateY", 0.1677339686527083, 0.3482126536808231]], [["Solarize", 0.15234175951790285, 0.7893840414281341], ["TranslateX", 0.2396395768284183, 0.27727219214979715]], [["Contrast", 0.3792017455380605, 0.32323660409845334], ["Contrast", 0.1356037413846466, 0.9127772969992305]], [["ShearX", 0.02642732222284716, 0.9184662576502115], ["Equalize", 0.11504884472142995, 0.8957638893097964]], [["TranslateY", 0.3193812913345325, 0.8828100030493128], ["ShearY", 0.9374975727563528, 0.09909415611083694]], [["AutoContrast", 0.025840721736048122, 0.7941037581373024], ["TranslateY", 0.498518003323313, 0.5777122846572548]], [["ShearY", 0.6042199307830248, 0.44809668754508836], ["Cutout", 0.3243978207701482, 0.9379740926294765]], [["ShearY", 0.6858549297583574, 0.9993252035788924], ["Sharpness", 0.04682428732773203, 0.21698099707915652]], [["ShearY", 0.7737469436637263, 0.8810127181224531], ["ShearY", 0.8995655445246451, 0.4312416220354539]], [["TranslateY", 0.4953094136709374, 0.8144161580138571], ["Solarize", 0.26301211718928097, 0.518345311180405]], [["Brightness", 0.8820246486031275, 0.571075863786249], ["ShearX", 0.8586669146703955, 0.0060476383595142735]], [["Sharpness", 0.20519233710982254, 0.6144574759149729], ["Posterize", 0.07976625267460813, 0.7480145046726968]], [["ShearY", 0.374075419680195, 0.3386105402023202], ["ShearX", 0.8228083637082115, 0.5885174783155361]], [["Brightness", 0.3528780713814561, 0.6999884884306623], ["Sharpness", 0.3680348120526238, 0.16953358258959617]], [["Brightness", 0.24891223104442084, 0.7973853494920095], ["TranslateX", 0.004256803835524736, 0.0470216343108546]], [["Posterize", 0.1947344282646012, 0.7694802711054367], ["Cutout", 0.9594385534844785, 0.5469744140592429]], [["Invert", 0.19012504762806026, 0.7816140211434693], ["TranslateY", 0.17479746932338402, 0.024249345245078602]], [["Rotate", 0.9669262055946796, 0.510166180775991], ["TranslateX", 0.8990602034610352, 0.6657802719304693]], [["ShearY", 0.5453049050407278, 0.8476872739603525], ["Cutout", 0.14226529093962592, 0.15756960661106634]], [["Equalize", 0.5895291156113004, 0.6797218994447763], ["TranslateY", 0.3541442192192753, 0.05166001155849864]], [["Equalize", 0.39530681662726097, 0.8448335365081087], ["Brightness", 0.6785483272734143, 0.8805568647038574]], [["Cutout", 0.28633258271917905, 0.7750870268336066], ["Equalize", 0.7221097824537182, 0.5865506280531162]], [["Posterize", 0.9044429629421187, 0.4620266401793388], ["Invert", 0.1803008045494473, 0.8073190766288534]], [["Sharpness", 0.7054649148075851, 0.3877207948962055], ["TranslateX", 0.49260224225927285, 0.8987462620731029]], [["Sharpness", 0.11196934729294483, 0.5953704422694938], ["Contrast", 0.13969334315069737, 0.19310569898434204]], [["Posterize", 0.5484346101051778, 0.7914140118600685], ["Brightness", 0.6428044691630473, 0.18811316670808076]], [["Invert", 0.22294834094984717, 0.05173157689962704], ["Cutout", 0.6091129168510456, 0.6280845506243643]], [["AutoContrast", 0.5726444076195267, 0.2799840903601295], ["Cutout", 0.3055752727786235, 0.591639807512993]], [["Brightness", 0.3707116723204462, 0.4049175910826627], ["Rotate", 0.4811601625588309, 0.2710760253723644]], [["ShearY", 0.627791719653608, 0.6877498291550205], ["TranslateX", 0.8751753308366824, 0.011164650018719358]], [["Posterize", 0.33832547954522263, 0.7087039872581657], ["Posterize", 0.6247474435007484, 0.7707784192114796]], [["Contrast", 0.17620186308493468, 0.9946224854942095], ["Solarize", 0.5431896088395964, 0.5867904203742308]], [["ShearX", 0.4667959516719652, 0.8938082224109446], ["TranslateY", 0.7311343008292865, 0.6829842246020277]], [["ShearX", 0.6130281467237769, 0.9924010909612302], ["Brightness", 0.41039241699696916, 0.9753218875311392]], [["TranslateY", 0.0747250386427123, 0.34602725521067534], ["Rotate", 0.5902597465515901, 0.361094672021087]], [["Invert", 0.05234890878959486, 0.36914978664919407], ["Sharpness", 0.42140532878231374, 0.19204058551048275]], [["ShearY", 0.11590485361909497, 0.6518540857972316], ["Invert", 0.6482444740361704, 0.48256237896163945]], [["Rotate", 0.4931329446923608, 0.037076242417301675], ["Contrast", 0.9097939772412852, 0.5619594905306389]], [["Posterize", 0.7311032479626216, 0.4796364593912915], ["Color", 0.13912123993932402, 0.03997286439663705]], [["AutoContrast", 0.6196602944085344, 0.2531430457527588], ["Rotate", 0.5583937060431972, 0.9893379795224023]], [["AutoContrast", 0.8847753125072959, 0.19123028952580057], ["TranslateY", 0.494361716097206, 0.14232297727461696]], [["Invert", 0.6212360716340707, 0.033898871473033165], ["AutoContrast", 0.30839896957008295, 0.23603569542166247]], [["Equalize", 0.8255583546605049, 0.613736933157845], ["AutoContrast", 0.6357166629525485, 0.7894617347709095]], [["Brightness", 0.33840706322846814, 0.07917167871493658], ["ShearY", 0.15693175752528676, 0.6282773652129153]], [["Cutout", 0.7550520024859294, 0.08982367300605598], ["ShearX", 0.5844942417320858, 0.36051195083380105]]]
return p
def fa_reduced_svhn():
p = [[["TranslateX", 0.001576965129744562, 0.43180488809874773], ["Invert", 0.7395307279252639, 0.7538444307982558]], [["Contrast", 0.5762062225409211, 0.7532431872873473], ["TranslateX", 0.45212523461624615, 0.02451684483019846]], [["Contrast", 0.18962433143225088, 0.29481185671147325], ["Contrast", 0.9998112218299271, 0.813015355163255]], [["Posterize", 0.9633391295905683, 0.4136786222304747], ["TranslateY", 0.8011655496664203, 0.44102126789970797]], [["Color", 0.8231185187716968, 0.4171602946893402], ["TranslateX", 0.8684965619113907, 0.36514568324909674]], [["Color", 0.904075230324581, 0.46319140331093767], ["Contrast", 0.4115196534764559, 0.7773329158740563]], [["Sharpness", 0.6600262774093967, 0.8045637700026345], ["TranslateY", 0.5917663766021198, 0.6844241908520602]], [["AutoContrast", 0.16223989311434306, 0.48169653554195924], ["ShearX", 0.5433173232860344, 0.7460278151912152]], [["ShearX", 0.4913604762760715, 0.83391837859561], ["Color", 0.5580367056511908, 0.2961512691312932]], [["Color", 0.18567091721211237, 0.9296983204905286], ["Cutout", 0.6074026199060156, 0.03303273406448193]], [["Invert", 0.8049054771963224, 0.1340792344927909], ["Color", 0.4208839940504979, 0.7096454840962345]], [["ShearX", 0.7997786664546294, 0.6492629575700173], ["AutoContrast", 0.3142777134084793, 0.6526010594925064]], [["TranslateX", 0.2581027144644976, 0.6997433332894101], ["Rotate", 0.45490480973606834, 0.238620570022944]], [["Solarize", 0.837397161027719, 0.9311141273136286], ["Contrast", 0.640364826293148, 0.6299761518677469]], [["Brightness", 0.3782457347141744, 0.7085036717054278], ["Brightness", 0.5346150083208507, 0.5858930737867671]], [["Invert", 0.48780391510474086, 0.610086407879722], ["Color", 0.5601999247616932, 0.5393836220423195]], [["Brightness", 0.00250086643283564, 0.5003355864896979], ["Brightness", 0.003922153283353616, 0.41107110154584925]], [["TranslateX", 0.4073069009685957, 0.9843435292693372], ["Invert", 0.38837085318721926, 0.9298542033875989]], [["ShearY", 0.05479740443795811, 0.9113983424872698], ["AutoContrast", 0.2181108114232728, 0.713996037012164]], [["Brightness", 0.27747508429413903, 0.3217467607288693], ["ShearX", 0.02715239061946995, 0.5430731635396449]], [["Sharpness", 0.08994432959374538, 0.004706443546453831], ["Posterize", 0.10768206853226996, 0.39020299239900236]], [["Cutout", 0.37498679037853905, 0.20784809761469553], ["Color", 0.9825516352194511, 0.7654155662756019]], [["Color", 0.8899349124453552, 0.7797700766409008], ["Rotate", 0.1370222187174981, 0.2622119295138398]], [["Cutout", 0.7088223332663685, 0.7884456023190028], ["Solarize", 0.5362257505160836, 0.6426837537811545]], [["Invert", 0.15686225694987552, 0.5500563899117913], ["Rotate", 0.16315224193260078, 0.4246854030170752]], [["Rotate", 0.005266247922433631, 0.06612026206223394], ["Contrast", 0.06494357829209037, 0.2738420319474947]], [["Cutout", 0.30200619566806275, 0.06558008068236942], ["Rotate", 0.2168576483823022, 0.878645566986328]], [["Color", 0.6358930679444622, 0.613404714161498], ["Rotate", 0.08733206733004326, 0.4348276574435751]], [["Cutout", 0.8834634887239585, 0.0006853845293474659], ["Solarize", 0.38132051231951847, 0.42558752668491195]], [["ShearY", 0.08830136548479937, 0.5522438878371283], ["Brightness", 0.23816560427834074, 0.3033709051157141]], [["Solarize", 0.9015331490756151, 0.9108788708847556], ["Contrast", 0.2057898014670072, 0.03260096030427456]], [["Equalize", 0.9455978685121174, 0.14850077333434056], ["TranslateY", 0.6888705996522545, 0.5300565492007543]], [["Cutout", 0.16942673959343585, 0.7294197201361826], ["TranslateX", 0.41184830642301534, 0.7060207449376135]], [["Color", 0.30133344118702166, 0.24384417956342314], ["Sharpness", 0.4640904544421743, 0.32431840288061864]], [["Sharpness", 0.5195055033472676, 0.9386677467005835], ["Color", 0.9536519432978372, 0.9624043444556467]], [["Rotate", 0.8689597230556101, 0.23955490826730633], ["Contrast", 0.050071600927462656, 0.1309891556004179]], [["Cutout", 0.5349421090878962, 0.08239510727779054], ["Rotate", 0.46064964710717216, 0.9037689320897339]], [["AutoContrast", 0.5625256909986802, 0.5358003783186498], ["Equalize", 0.09204330691163354, 0.4386906784850649]], [["ShearX", 0.0011061172864470226, 0.07150284682189278], ["AutoContrast", 0.6015956946553209, 0.4375362295530898]], [["ShearY", 0.25294276499800983, 0.7937560397859562], ["Brightness", 0.30834103299704474, 0.21960258701547009]], [["Posterize", 0.7423948904688074, 0.4598609935109695], ["Rotate", 0.5510348811675979, 0.26763724868985933]], [["TranslateY", 0.3208729319318745, 0.945513054853888], ["ShearX", 0.4916473963030882, 0.8743840560039451]], [["ShearY", 0.7557718687011286, 0.3125397104722828], ["Cutout", 0.5565359791865849, 0.5151359251135629]], [["AutoContrast", 0.16652786355571275, 0.1101575800958632], ["Rotate", 0.05108851703032641, 0.2612966401802814]], [["Brightness", 0.380296489835016, 0.0428162454174662], ["ShearX", 0.3911934083168285, 0.18933607362790178]], [["Color", 0.002476250465397678, 0.07795275305347571], ["Posterize", 0.08131841266654188, 0.14843363184306413]], [["Cutout", 0.36664558716104434, 0.20904484995063996], ["Cutout", 0.07986452057223141, 0.9287747671053432]], [["Color", 0.9296812469919231, 0.6634239915141935], ["Rotate", 0.07632463573240006, 0.408624029443747]], [["Cutout", 0.7594470171961278, 0.9834672124229463], ["Solarize", 0.4471371303745053, 0.5751101102286562]], [["Posterize", 0.051186719734032285, 0.5110941294710823], ["Sharpness", 0.040432522797391596, 0.42652298706992164]], [["Sharpness", 0.2645335264327221, 0.8844553189835457], ["Brightness", 0.7229600357932696, 0.16660749270785696]], [["Sharpness", 0.6296376086802589, 0.15564989758083458], ["Sharpness", 0.7913410481400365, 0.7022615408082826]], [["Cutout", 0.5517247347343883, 0.43794888517764674], ["ShearX", 0.6951051782530201, 0.6230992857867065]], [["ShearX", 0.9015708556331022, 0.6322135168527783], ["Contrast", 0.4285629283441831, 0.18158321019502988]], [["Brightness", 0.9014292329524769, 0.3660463325457713], ["Invert", 0.6700729097206592, 0.16502732071917703]], [["AutoContrast", 0.6432764477303431, 0.9998909112400834], ["Invert", 0.8124063975545761, 0.8149683327882365]], [["Cutout", 0.6023944009428617, 0.9630976951918225], ["ShearX", 0.2734723568803071, 0.3080911542121765]], [["Sharpness", 0.048949115014412806, 0.44497866256845164], ["Brightness", 0.5611832867244329, 0.12994217480426257]], [["TranslateY", 0.4619112333002525, 0.47317728091588396], ["Solarize", 0.618638784910472, 0.9508297099190338]], [["Sharpness", 0.9656274391147018, 0.3402622993963962], ["Cutout", 0.8452511174508919, 0.3094717093312621]], [["ShearX", 0.04942201651478659, 0.6910568465705691], ["AutoContrast", 0.7155342517619936, 0.8565418847743523]], [["Brightness", 0.5222290590721783, 0.6462675303633422], ["Sharpness", 0.7756317511341633, 0.05010730683866704]], [["Contrast", 0.17098396012942796, 0.9128908626236187], ["TranslateY", 0.1523815376677518, 0.4269909829886339]], [["Cutout", 0.7679024720089866, 0.22229116396644455], ["Sharpness", 0.47714827844878843, 0.8242815864830401]], [["Brightness", 0.9321772357292445, 0.11339758604001371], ["Invert", 0.7021078495093375, 0.27507749184928154]], [["ShearY", 0.7069449324510433, 0.07262757954730437], ["Cutout", 0.6298690227159313, 0.8866813664859028]], [["ShearX", 0.8153137620199989, 0.8478194179953927], ["ShearX", 0.7519451353411938, 0.3914579556959725]], [["Cutout", 0.07152574469472753, 0.2629935229222503], ["TranslateX", 0.43728405510089485, 0.2610201002449789]], [["AutoContrast", 0.5824529633013098, 0.5619551536261955], ["Rotate", 0.45434137552116965, 0.7567169855140041]], [["TranslateY", 0.9338431187142137, 0.14230481341042783], ["Cutout", 0.744797723251028, 0.4346601666787713]], [["ShearX", 0.3197252560289169, 0.8770408070016171], ["Color", 0.7657013088540465, 0.2685586719812284]], [["ShearY", 0.6542181749801549, 0.8148188744344297], ["Sharpness", 0.5108985661436543, 0.9926016115463769]], [["ShearY", 0.39218730620135694, 0.857769946478945], ["Color", 0.39588355914920886, 0.9910530523789284]], [["Invert", 0.4993610396803735, 0.08449723470758526], ["TranslateX", 0.46267456928508305, 0.46691125646493964]], [["Equalize", 0.8640576819821256, 0.3973808869887604], ["ShearY", 0.5491163877063172, 0.422429328786161]], [["Contrast", 0.6146206387722841, 0.8453559854684094], ["TranslateX", 0.7974333014574718, 0.47395476786951773]], [["Contrast", 0.6828704722015236, 0.6952755697785722], ["Brightness", 0.7903069452567497, 0.8350915035109574]], [["Rotate", 0.1211091761531299, 0.9667702562228727], ["Color", 0.47888534537103344, 0.8298620028065332]], [["Equalize", 0.20009722872711086, 0.21851235854853018], ["Invert", 0.4433641154198673, 0.41902203581091935]], [["AutoContrast", 0.6333190204577053, 0.23965630032835372], ["Color", 0.38651217030044804, 0.06447323778198723]], [["Brightness", 0.378274337541471, 0.5482593116308322], ["Cutout", 0.4856574442608347, 0.8889688535495244]], [["Rotate", 0.8201259323479384, 0.7404525573938633], ["Color", 0.28371236449364595, 0.7866003515933161]], [["Brightness", 0.10053196350009105, 0.18814037089411267], ["Sharpness", 0.5572102497672569, 0.04458217557977126]], [["AutoContrast", 0.6445330112376135, 0.48082049184921843], ["TranslateY", 0.378898917914949, 0.9338102625289362]], [["AutoContrast", 0.08482623401924708, 0.25199930695784384], ["Solarize", 0.5981823550521426, 0.19626357596662092]], [["Solarize", 0.4373030803918095, 0.22907881245285625], ["AutoContrast", 0.6383084635487905, 0.29517603235993883]], [["AutoContrast", 0.922112624726991, 0.29398098144910145], ["AutoContrast", 0.8550184811514672, 0.8030331582292343]], [["ShearX", 0.38761582800913896, 0.06304125015084923], ["Contrast", 0.3225758804984975, 0.7089696696094797]], [["TranslateY", 0.27499498563849206, 0.1917583097241206], ["Color", 0.5845853711746438, 0.5353520071667661]], [["ShearY", 0.530881951424285, 0.47961248148116453], ["ShearX", 0.04666387744533289, 0.275772822690165]], [["Solarize", 0.5727309318844802, 0.02889734544563341], ["AutoContrast", 0.638852434854615, 0.9819440776921611]], [["AutoContrast", 0.9766868312173507, 0.9651796447738792], ["AutoContrast", 0.3489760216898085, 0.3082182741354106]], [["Sharpness", 0.13693510871346704, 0.08297205456926067], ["Contrast", 0.3155812019005854, 0.031402991638917896]], [["TranslateY", 0.2664707540547008, 0.4838091910041236], ["ShearX", 0.5935665395229432, 0.7813088248538167]], [["ShearY", 0.7578577752251343, 0.5116014090216161], ["ShearX", 0.8332831240873545, 0.26781876290841017]], [["TranslateY", 0.473254381651761, 0.4203181582821155], ["ShearY", 0.732848696900726, 0.47895514793728433]], [["Solarize", 0.6922689176672292, 0.36403255869823725], ["AutoContrast", 0.910654040826914, 0.888651414068326]], [["ShearX", 0.37326536936166244, 0.47830923320699525], ["Equalize", 0.4724702976076929, 0.8176108279939023]], [["Contrast", 0.3839906424759326, 0.09109695563933692], ["Invert", 0.36305435543972325, 0.5701589223795499]], [["Invert", 0.5175591137387999, 0.38815675919253867], ["TranslateY", 0.1354848160153554, 0.41734106283245065]], [["Color", 0.829616006981199, 0.18631472346156963], ["Color", 0.2465115448326214, 0.9439365672808333]], [["Contrast", 0.18207939197942158, 0.39841173152850873], ["ShearX", 0.16723588254695632, 0.2868649619006758]], [["Posterize", 0.1941909136988733, 0.6322499882557473], ["Contrast", 0.6109060391509794, 0.27329598688783296]], [["AutoContrast", 0.9148775146158022, 0.09129288311923844], ["Sharpness", 0.4222442287436423, 0.847961820057229]], [["Color", 0.21084007475489852, 0.008218056412554131], ["Contrast", 0.43996934555301637, 0.500680146508504]], [["ShearY", 0.6745287915240038, 0.6120305524405164], ["Equalize", 0.467403794543269, 0.2207148995882467]], [["Color", 0.7712823974371379, 0.2839161885566902], ["Color", 0.8725368489709752, 0.3349470222415115]], [["Solarize", 0.5563976601161562, 0.540446614847802], ["Invert", 0.14228071175107454, 0.2242332811481905]], [["Contrast", 0.34596757983998383, 0.9158971503395041], ["Cutout", 0.6823724203724072, 0.5221518922863516]], [["Posterize", 0.3275475232882672, 0.6520033254468702], ["Color", 0.7434224109271398, 0.0824308188060544]], [["Cutout", 0.7295122229650082, 0.277887573018184], ["Brightness", 0.5303655506515258, 0.28628046739964497]], [["Color", 0.8533293996815943, 0.24909788223027743], ["Color", 0.6915962825167857, 0.33592561040195834]], [["TranslateX", 0.0761441550001345, 0.7043906245420134], ["Equalize", 0.670845297717783, 0.30986063097084215]], [["Contrast", 0.30592723366237995, 0.7365013059287382], ["Color", 0.6173835128817455, 0.6417028717640598]], [["Rotate", 0.05558240682703821, 0.7284722849011761], ["Color", 0.7814801133853666, 0.13335113981884217]], [["ShearY", 0.6521743070190724, 0.6272195913574455], ["Rotate", 0.36278432239870423, 0.2335623679787695]], [["Color", 0.6799351102482663, 0.3850250771244986], ["Brightness", 0.613901077818094, 0.2374900558949702]], [["Color", 0.551451255148252, 0.7284757153447965], ["Solarize", 0.4863815212982878, 0.3857941567681324]], [["Contrast", 0.32516343965159267, 0.689921852601276], ["Cutout", 0.5922142001124506, 0.7709605594115009]], [["Brightness", 0.23760063764495856, 0.6392077018854179], ["Brightness", 0.7288124083714078, 0.4487520490201095]], [["Sharpness", 0.5631112298553713, 0.6803534985114782], ["ShearX", 0.6743791169050775, 0.34039227245151127]], [["AutoContrast", 0.8260911840078349, 0.7705607269534767], ["Rotate", 0.8880749478363638, 0.8182460047684648]], [["ShearY", 0.7037620764408412, 0.5219573160970589], ["Posterize", 0.7186150466761102, 0.6187857686944253]], [["TranslateY", 0.2140494926702246, 0.9104233882669488], ["TranslateX", 0.4096039512896902, 0.9692703030784571]], [["Equalize", 0.5404313549028165, 0.04094078980738014], ["AutoContrast", 0.07870278300673744, 0.841020779977939]], [["ShearY", 0.2684638876128488, 0.5599793678740521], ["Cutout", 0.19537995362704022, 0.2400995206366768]], [["AutoContrast", 0.19366394417090382, 0.4130755503251951], ["Sharpness", 0.11735660606190662, 0.39276612830651914]], [["Cutout", 0.8313266945081518, 0.37171822186374703], ["Contrast", 0.5088549187459019, 0.2956405118511817]], [["Cutout", 0.28375485371479847, 0.37020183949342683], ["Posterize", 0.718761436947423, 0.2278804627251678]], [["ShearY", 0.6625840735667625, 0.5045065697748213], ["Rotate", 0.5175257698523389, 0.39496923901188824]], [["Color", 0.6498154010188212, 0.38674158604408604], ["Brightness", 0.8157804892728057, 0.05660118670560971]], [["Color", 0.5512855420254102, 0.7812054820692542], ["Solarize", 0.8851292984174468, 0.2808951606943277]], [["Contrast", 0.35258433539074363, 0.8085377169629859], ["Cutout", 0.5197965849563265, 0.8657111726930974]], [["Cutout", 0.23650925054419358, 0.746860862983295], ["Brightness", 0.8842190203336139, 0.4389347348156118]], [["Rotate", 0.8651460526861932, 0.0031372441327392753], ["Equalize", 0.3909498933963822, 0.6221687914603954]], [["TranslateX", 0.5793690303540427, 0.37939687327382987], ["Invert", 0.846172545690258, 0.36950442052945853]], [["Invert", 0.5151721602607067, 0.5860134277259832], ["Contrast", 0.6868708526377458, 0.2188104093363727]], [["Contrast", 0.28019632529718025, 0.8403553410328943], ["Cutout", 0.5238340355491738, 0.6948434115725599]], [["Rotate", 0.1592592617684533, 0.5212044951482974], ["Color", 0.42404215473874546, 0.45894052919059103]], [["AutoContrast", 0.21780978427851283, 0.11813011387113281], ["Contrast", 0.14557770349869537, 0.5468616480449002]], [["Cutout", 0.03573873600256905, 0.8747186430368771], ["AutoContrast", 0.4804465018567564, 0.3968185812087325]], [["ShearY", 0.027192162947493492, 0.35923750027515866], ["Sharpness", 0.03207302705814674, 0.25868625346023777]], [["AutoContrast", 0.9111793886013045, 0.33534571661592005], ["ShearY", 0.31365410004768934, 0.37055495208177025]], [["Color", 0.5119732811716222, 0.10635303813092001], ["Solarize", 0.9828759703639677, 0.33302532900783466]], [["Contrast", 0.9652840964645487, 0.9550826002089741], ["ShearY", 0.16934262075572262, 0.35893022906919625]], [["Invert", 0.21526903298837538, 0.5491812432380025], ["TranslateX", 0.27691575128765095, 0.9916365493500338]], [["AutoContrast", 0.7223428288831728, 0.3001506080569529], ["Posterize", 0.28280773693692957, 0.5630226986948541]], [["TranslateY", 0.5334698670580152, 0.4329627064903895], ["Solarize", 0.11621274404555687, 0.38564564358937725]], [["Brightness", 0.9001900081991266, 0.15453762529292236], ["Equalize", 0.6749827304986464, 0.2174408558291521]], [["TranslateY", 0.703293071780793, 0.20371204513522137], ["Invert", 0.7921926919880306, 0.2647654009616249]], [["AutoContrast", 0.32650519442680254, 0.5567514700913352], ["ShearY", 0.7627653627354407, 0.5363510886152073]], [["Rotate", 0.364293676091047, 0.4262321334071656], ["Posterize", 0.7284189361001443, 0.6052618047275847]], [["Contrast", 0.004679138490284229, 0.6985327823420937], ["Posterize", 0.25412559986607497, 0.969098825421215]], [["ShearY", 0.6831738973100172, 0.6916463366962687], ["TranslateY", 0.8744153159733203, 0.3667879549647143]], [["Posterize", 0.39138456188265913, 0.8617909225610128], ["TranslateX", 0.5198303654364824, 0.5518823068009463]], [["Invert", 0.6471155996761706, 0.4793957129423701], ["ShearX", 0.8046274258703997, 0.9711394307595065]], [["Solarize", 0.2442520851809611, 0.5518114414771629], ["Sharpness", 0.02324109511463257, 0.18216585433541427]], [["Cutout", 0.7004457278387007, 0.4904439660213413], ["Contrast", 0.6516622044646659, 0.7324290164242575]], [["Brightness", 0.594212018801632, 0.5624822682300464], ["ShearX", 0.47929863548325596, 0.5610640338380719]], [["TranslateX", 0.20863492063218445, 0.23761872077836552], ["Color", 0.9374148559524687, 0.06390809573246009]], [["AutoContrast", 0.5548946725094693, 0.40547561665765874], ["Equalize", 0.26341425401933344, 0.2763692089379619]], [["Invert", 0.8224614398122034, 0.15547159819315676], ["Rotate", 0.4915912924663281, 0.6995695827608112]], [["Equalize", 0.05752620481520809, 0.80230125774557], ["Rotate", 0.16338857010673558, 0.8066738989167762]], [["ShearY", 0.5437502855505825, 0.252101665309144], ["Contrast", 0.9268450172095902, 0.13437399256747992]], [["TranslateY", 0.6946438457089812, 0.35376889837139813], ["Sharpness", 0.15438234648960253, 0.2668696344562673]], [["Invert", 0.24506516252953542, 0.1939315433476327], ["Sharpness", 0.8921986990130818, 0.21478051316241717]], [["TranslateY", 0.5292829065905086, 0.6896826369723732], ["Invert", 0.4461047865540309, 0.9854416526561315]], [["Posterize", 0.8085062334285464, 0.4538963572040656], ["Brightness", 0.2623572045603854, 0.16723779221170698]], [["Solarize", 0.1618752496191097, 0.6007634864056693], ["TranslateY", 0.07808851801433346, 0.3951252736249746]], [["TranslateX", 0.35426056783145843, 0.8875451782909476], ["Brightness", 0.5537927990151869, 0.3042790536918476]], [["Cutout", 0.9051584028783342, 0.6050507821593669], ["ShearX", 0.31185875057627255, 0.39145181108334876]], [["Brightness", 0.43157388465566776, 0.45511767545129933], ["ShearY", 0.626464342187273, 0.5251031991594401]], [["Contrast", 0.7978520212540166, 0.45088491126800995], ["ShearY", 0.20415027867560143, 0.24369493783350643]], [["ShearX", 0.48152242363853065, 0.001652619381325604], ["Sharpness", 0.6154899720956758, 0.22465778944283568]], [["Posterize", 0.0008092255557418104, 0.8624848793450179], ["Solarize", 0.7580784903978838, 0.4141187863855049]], [["TranslateY", 0.4829597846471378, 0.6077028815706373], ["ShearX", 0.43316420981872894, 0.007119694447608018]], [["Equalize", 0.2914045973615852, 0.6298874433109889], ["Cutout", 0.18663096101056076, 0.20634383363149222]], [["TranslateX", 0.6909947340830737, 0.40843889682671003], ["ShearX", 0.3693105697811625, 0.070573833710386]], [["Rotate", 0.6184027722396339, 0.6483359499288176], ["AutoContrast", 0.8658233903089285, 0.31462524418660626]], [["Brightness", 0.8165837262133947, 0.38138221738335765], ["Contrast", 0.01566790570443702, 0.1250581265407818]], [["Equalize", 0.16745169701901802, 0.9239433721204139], ["ShearY", 0.5535908803004554, 0.35879199699526654]], [["Color", 0.9675880875486578, 0.19745998576077994], ["Posterize", 0.641736196661405, 0.5702363593336868]], [["ShearY", 0.27730895136251943, 0.4730273890919014], ["Posterize", 0.35829530316120517, 0.9040968539551122]], [["Cutout", 0.9989158254302966, 0.3210048366589035], ["Equalize", 0.9226385492886618, 0.21132010337062]], [["Posterize", 0.32861829410989934, 0.7608163668499222], ["TranslateY", 0.528381246453454, 0.6837459631017135]], [["ShearY", 0.6786278797045173, 0.49006792710382946], ["ShearX", 0.7860409944610941, 0.7960317025665418]], [["Solarize", 0.4420731874598513, 0.7163961196254427], ["Sharpness", 0.11927615232343353, 0.3649599343067734]], [["Cutout", 0.4606157449857542, 0.4682141505042986], ["Contrast", 0.8955528913735222, 0.8468556570983498]], [["Brightness", 0.5742349576881501, 0.5633914487991978], ["ShearX", 0.8288987143597276, 0.5937556836469728]], [["Posterize", 0.05362153577922808, 0.40072961361335696], ["Rotate", 0.6681795049585278, 0.5348470042353504]], [["TranslateY", 0.6190833866612555, 0.7338431624993972], ["Color", 0.5352400737236565, 0.1598194251940268]], [["Brightness", 0.9942846465176832, 0.11918348505217388], ["Brightness", 0.0659098729688602, 0.6558077481794591]], [["Equalize", 0.34089122700685126, 0.048940774058585546], ["ShearX", 0.5472987107071652, 0.2965222509150173]], [["Sharpness", 0.3660728361470086, 0.37607120931207433], ["Sharpness", 0.9974987257291261, 0.2483317486035219]], [["Posterize", 0.931283270966942, 0.7525022430475327], ["Cutout", 0.6299208568533524, 0.3313382622423058]], [["Invert", 0.5074998650080915, 0.9722820836624784], ["Solarize", 0.13997049847474802, 0.19340041815763026]], [["AutoContrast", 0.6804950477263457, 0.31675149536227815], ["Solarize", 0.800632422196852, 0.09054278636377117]], [["TranslateY", 0.6886579465517867, 0.549118383513461], ["Brightness", 0.7298771973550124, 0.59421647759784]], [["Equalize", 0.8117050130827859, 0.22494316766261946], ["AutoContrast", 0.5217061631918504, 0.6106946809838144]], [["Equalize", 0.4734718117645248, 0.7746036952254298], ["Posterize", 0.032049205574512685, 0.9681402692267316]], [["Brightness", 0.4724177066851541, 0.7969700024018729], ["Solarize", 0.6930049134926459, 0.3880086567038069]], [["TranslateX", 0.2833979092130342, 0.6873833799104118], ["Rotate", 0.37167767436617366, 0.03249352593350204]], [["Posterize", 0.7080588381354884, 0.03014586990329654], ["Posterize", 0.20883930954891392, 0.1328596635826556]], [["Cutout", 0.1992050307454733, 0.8079881690617468], ["ShearY", 0.3057279570820446, 0.34868823290010564]], [["TranslateY", 0.6204358851346782, 0.24978856155434062], ["ShearX", 0.2403059671388028, 0.6706906799258086]], [["Contrast", 0.5527380063918701, 0.27504242043334765], ["Rotate", 0.37361791978638376, 0.17818567121454373]], [["Cutout", 0.3368229687890997, 0.013512329226772313], ["Contrast", 0.18480406673028238, 0.21653280083721013]], [["AutoContrast", 0.13634047961070397, 0.5322441057075571], ["Posterize", 0.3409948654529233, 0.2562132228604077]], [["Invert", 0.3375636037272626, 0.5417577242453775], ["Sharpness", 0.10271458969925179, 0.5125859420868099]], [["Invert", 0.26465503753231256, 0.7386494688407392], ["AutoContrast", 0.5310106090963371, 0.14699248759273964]], [["Sharpness", 0.8494538270706318, 0.9524607358113082], ["Solarize", 0.21142978953773187, 0.10711867917080763]], [["Equalize", 0.5185117903942263, 0.06342404369282638], ["ShearY", 0.26812877371366156, 0.32386585917978056]], [["TranslateY", 0.42724471339053904, 0.5218262942425845], ["Brightness", 0.7618037699290332, 0.5773256674209075]], [["Solarize", 0.5683461491921462, 0.7988018975591509], ["AutoContrast", 0.21826664523938988, 0.4395073407383595]], [["Posterize", 0.2564295537162734, 0.6778150727248975], ["Equalize", 0.7571361164411801, 0.4281744623444925]], [["Invert", 0.5171620125994946, 0.8719074953677988], ["ShearX", 0.10216776728552601, 0.20888013515457593]], [["Equalize", 0.934033636879294, 0.7724470445507672], ["ShearX", 0.14671590364536757, 0.06500753170863127]], [["Cutout", 0.48433709681747783, 0.8989915985203363], ["ShearY", 0.5161346572684965, 0.3154078452465332]], [["AutoContrast", 0.4337913490682531, 0.8651407398083308], ["AutoContrast", 0.31402168607643444, 0.5001710653814162]], [["Brightness", 0.4805460794016203, 0.8182812769485313], ["Equalize", 0.6811585495672738, 0.25172380097389147]], [["TranslateX", 0.05384872718386273, 0.7854623644701991], ["Color", 0.12583336502656287, 0.08656304042059215]], [["TranslateX", 0.3949348949001942, 0.0668909826131569], ["ShearX", 0.2895255694762277, 0.23998090792480392]], [["TranslateY", 0.3183346601371876, 0.5869865305603826], ["Cutout", 0.38601500458347904, 0.37785641359408184]], [["Sharpness", 0.3676509660134142, 0.6370727445512337], ["Rotate", 0.17589815946040205, 0.912442427082365]], [["Equalize", 0.46427003979798154, 0.7771177715171392], ["Cutout", 0.6622980582423883, 0.47780927252115374]], [["TranslateX", 0.4535588156726688, 0.9548833090146791], ["ShearY", 0.18609208838268262, 0.034329918652624025]], [["Rotate", 0.4896172340987028, 0.4842683413051553], ["Brightness", 0.08416972178617699, 0.2946109607041465]], [["TranslateY", 0.1443363248914217, 0.7352253161146544], ["ShearX", 0.025210952382823004, 0.6249971039957651]], [["Brightness", 0.08771030702840285, 0.5926338109828604], ["Contrast", 0.629121304110493, 0.36114268164347396]], [["Cutout", 0.003318169533990778, 0.984234627407162], ["Color", 0.5656264894233379, 0.9913705503959709]], [["Cutout", 0.17582168928005226, 0.5163176285036686], ["Sharpness", 0.42976684239235224, 0.9936723374147685]], [["Rotate", 0.13343297511611085, 0.730719022391835], ["Cutout", 0.43419793455016154, 0.9802436121876401]], [["ShearX", 0.8761482122895571, 0.11688364945899332], ["Solarize", 0.6071032746712549, 0.9972373138154098]], [["Contrast", 0.2721995133325574, 0.9467839388553563], ["AutoContrast", 0.357368427575824, 0.6530359095247653]], [["Equalize", 0.5334298945812708, 0.7157629957411794], ["Brightness", 0.8885107405370157, 0.2909013041171791]], [["Equalize", 0.4907081744271751, 0.9999203497290372], ["ShearX", 0.0055186544890628575, 0.20501406304441697]], [["Color", 0.4865852751351166, 0.14717278223914915], ["TranslateX", 0.0492335566831905, 0.01654291587484527]], [["Contrast", 0.3753662301521211, 0.866484274102244], ["Color", 0.21148416029328898, 0.37861792266657684]], [["TranslateY", 0.03960047686663052, 0.9948086048192006], ["TranslateX", 0.5802633545422445, 0.7696464344779717]], [["Contrast", 0.6456791961464718, 0.6304663998505495], ["Sharpness", 0.594774521429873, 0.8024138008893688]], [["Equalize", 0.5326123709954759, 0.7361990154971826], ["Invert", 0.5337609996065145, 0.06826577456972233]], [["ShearY", 0.7177596430755101, 0.16672206074906565], ["Equalize", 0.1847132768987843, 0.16186121936769876]], [["ShearY", 0.037342495065949534, 0.7762322168034441], ["Rotate", 0.28731231550023495, 0.4605573565280328]], [["Contrast", 0.6815742688289678, 0.04073638022156048], ["Cutout", 0.20201133153964437, 0.048429819360450654]], [["Color", 0.5295323372448824, 0.8591352159356821], ["Posterize", 0.7743900815037675, 0.8308865010050488]], [["Solarize", 0.9325362059095493, 0.4070769736318192], ["Contrast", 0.09359008071252661, 0.2808191171337515]], [["Sharpness", 0.6413241263332543, 0.5493867784897841], ["Solarize", 0.021951790397463734, 0.1045868634597023]], [["Color", 0.006027943433085061, 0.698043169126901], ["TranslateX", 0.06672167045857719, 0.6096719632236709]], [["TranslateX", 0.42167004878865333, 0.8844171486107537], ["Color", 0.12383835252312375, 0.9559595374068695]], [["Posterize", 0.5382560989047361, 0.6014252438301297], ["Color", 0.26197040526014054, 0.3423981550778665]], [["Cutout", 0.33150268513579584, 0.40828564490879615], ["AutoContrast", 0.6907753092981255, 0.05779246756831708]], [["Equalize", 0.31608006376116865, 0.9958870759781376], ["TranslateY", 0.15842255624921547, 0.5764254535539765]], [["Contrast", 0.19859706438565994, 0.12680764238281503], ["TranslateY", 0.4694115475285127, 0.45831161348904836]], [["TranslateX", 0.18768081492494126, 0.7718605539481094], ["Cutout", 0.2340834739291012, 0.3290460999084155]], [["Posterize", 0.17300123510877463, 0.5276823821218432], ["AutoContrast", 0.5861008799330297, 0.31557924295308126]], [["TranslateX", 0.36140745478517367, 0.4172762477431993], ["Sharpness", 0.6518477061748665, 0.9033991248207786]], [["AutoContrast", 0.1757278990984992, 0.9562490311064124], ["Invert", 0.43712652497757065, 0.26925880337078234]], [["TranslateX", 0.38113274849599377, 0.35742156735271613], ["TranslateY", 0.47708889990018216, 0.7975974044609476]], [["Brightness", 0.39538470887490523, 0.09692156164771923], ["Equalize", 0.876825166573471, 0.0979346217138612]], [["Solarize", 0.07679586061933875, 0.45996163577975313], ["Invert", 0.039726680682847904, 0.23574574397443826]], [["ShearX", 0.9739648414905278, 0.5217986621319772], ["TranslateY", 0.21653455086845896, 0.30415852174016683]], [["TranslateY", 0.26965366633030263, 0.4355259497820251], ["Sharpness", 0.6343493801543757, 0.9337027079656623]], [["Rotate", 0.42301232492240126, 0.07813015342326983], ["AutoContrast", 0.28524730310382906, 0.24127293503900557]], [["Color", 0.826300213905907, 0.008451115447607682], ["Equalize", 0.6770124607838715, 0.2889698349030014]], [["Cutout", 0.3461911530045792, 0.7481322146924341], ["Brightness", 0.1831459184570124, 0.5487074846857195]], [["Brightness", 0.8455429603962046, 0.4838335496721761], ["Cutout", 0.5778222397066808, 0.7789798279724414]], [["Brightness", 0.7859388330361665, 0.5907006126719181], ["Brightness", 0.5299842953874527, 0.008670514958094622]], [["Rotate", 0.9584331504536162, 0.7242692977964363], ["TranslateY", 0.46941406313257866, 0.748911298847083]], [["AutoContrast", 0.5878130357161462, 0.25218818797390996], ["Solarize", 0.815466142337258, 0.20231731395730107]], [["ShearX", 0.15594838773787617, 0.9764784874102524], ["TranslateY", 0.5805369037495945, 0.1412009058745196]], [["Sharpness", 0.7936370935749524, 0.5142489498674206], ["Sharpness", 0.1544307510097193, 0.3678451501088748]], [["TranslateY", 0.29391437860633873, 0.3520843012638746], ["Brightness", 0.5885278199370352, 0.04915265122854349]], [["AutoContrast", 0.3329771519033218, 0.2459852352278583], ["Equalize", 0.8674782697650298, 0.2900192232303214]], [["Cutout", 0.58997726901359, 0.9910393463442352], ["Contrast", 0.09792234559792412, 0.23341828880112486]], [["Cutout", 0.4643317809492098, 0.3224299097542076], ["TranslateY", 0.7998033586490294, 0.27086436352896565]], [["AutoContrast", 0.13138317155414905, 0.3419742927322439], ["TranslateY", 0.05413070060788905, 0.5504283113763994]], [["Posterize", 0.3645493423712921, 0.10684861674653627], ["Color", 0.6343589365592908, 0.9712261380583729]], [["Color", 0.06539862123316142, 0.34370535435837324], ["Equalize", 0.8098077629435421, 0.1272416658849032]], [["Invert", 0.3600258964493429, 0.7455698641930473], ["Color", 0.4118102215241555, 0.4489347750419333]], [["Sharpness", 0.2230673636976691, 0.2240713255305713], ["AutoContrast", 0.5039292091174429, 0.033700713206763835]], [["ShearX", 0.10611028325684749, 0.4235430688519599], ["Brightness", 0.354597328722803, 0.6835155193055997]], [["ShearX", 0.101313662029975, 0.3048854771395032], ["ShearX", 0.39832929626318425, 0.5569152062399838]], [["ShearX", 0.46033087857932264, 0.5976525683159943], ["Color", 0.8117411866929898, 0.22950658046373415]], [["Cutout", 0.04125062306390376, 0.5021647863925347], ["TranslateY", 0.4949139091550513, 0.40234738545601595]], [["TranslateX", 0.9982425877241792, 0.3912268450702254], ["Cutout", 0.8094853705295444, 0.4628037417520003]], [["Contrast", 0.47154787535001147, 0.5116549800625204], ["Invert", 0.4929108509901112, 0.713690694626014]], [["ShearX", 0.3073913369156325, 0.5912409524756753], ["Equalize", 0.5603975982699875, 0.12046838435247365]], [["TranslateY", 0.8622939212850868, 0.057802109037417344], ["Invert", 0.7577173459800602, 0.33727019024447835]], [["Cutout", 0.3646694663986778, 0.6285264075514656], ["Color", 0.5589259087346165, 0.6650676195317845]], [["Invert", 0.8563008117600374, 0.6216056385231019], ["AutoContrast", 0.7575002303510038, 0.6906934785154547]], [["ShearX", 0.4415411885102101, 0.301535484182858], ["TranslateY", 0.779716145113622, 0.5792057745092073]], [["Invert", 0.10736083594024397, 0.10640910911300788], ["Posterize", 0.5923391813408784, 0.5437447559328059]], [["Color", 0.4745215286268124, 0.08046291318852558], ["Rotate", 0.1642897827127771, 0.20754337935267492]], [["Invert", 0.3141086213412405, 0.5865422721808763], ["AutoContrast", 0.7551954144793225, 0.5588044000850431]], [["Equalize", 0.979500405577596, 0.6846916489547885], ["Rotate", 0.11257616752512875, 0.8137724117751907]], [["Equalize", 0.6315666801659133, 0.71548254701219], ["Cutout", 0.38805635642306224, 0.29282906744304604]], [["Posterize", 0.022485702859896456, 0.2794994040845844], ["Color", 0.4554990465860552, 0.5842888808848151]], [["Invert", 0.15787502346886398, 0.5137397924063724], ["TranslateY", 0.487638703473969, 0.6428121360825987]], [["Rotate", 0.20473927977443407, 0.6090899892067203], ["Contrast", 0.3794752343740154, 0.8056548374185936]], [["AutoContrast", 0.35889225269685354, 0.7311496777471619], ["Sharpness", 0.10152796686794396, 0.34768639850633193]], [["Rotate", 0.6298704242033275, 0.09649334401126405], ["Solarize", 0.24713244934163017, 0.4292117526982358]], [["Contrast", 0.9851015107131748, 0.30895068679118054], ["Sharpness", 0.7167845732283787, 0.36269175386392893]], [["Equalize", 0.49699932368219435, 0.21262924430159158], ["Contrast", 0.8497731498354579, 0.672321242252727]], [["ShearX", 0.18955591368056923, 0.47178691165954034], ["Sharpness", 0.17732805705271348, 0.5486957094984023]], [["ShearY", 0.5087926728214892, 0.8236809302978783], ["AutoContrast", 0.9661195881001936, 0.1309360428195535]], [["Rotate", 0.7825835251082691, 0.8292427086033229], ["TranslateX", 0.2034110174253454, 0.4073091408820304]], [["Cutout", 0.33457316681888716, 0.480098511703719], ["Sharpness", 0.8686004956803908, 0.21719357589897192]], [["ShearX", 0.30750577846813, 0.6349236735519613], ["Color", 0.5096781256213182, 0.5367289796478476]], [["Rotate", 0.7881847986981432, 0.846966895144323], ["Posterize", 0.33955649631388407, 0.9484449471562024]], [["Posterize", 0.5154127791998345, 0.8765287012129974], ["Posterize", 0.09621562708431097, 0.42108077474553995]], [["ShearX", 0.5513772653411826, 0.27285892893658015], ["AutoContrast", 0.027608088485522986, 0.1738173285576814]], [["Equalize", 0.7950881609822011, 0.05938388811616446], ["ShearX", 0.7864733097562856, 0.5928584864954718]], [["Equalize", 0.03401947599579436, 0.4936643525799874], ["Solarize", 0.8445332527647407, 0.4695434980914176]], [["AutoContrast", 0.9656295942383031, 0.6330670076537706], ["Brightness", 0.303859679517296, 0.8882002295195086]], [["ShearY", 0.5242765280639856, 0.7977406809732712], ["Rotate", 0.24810823616083127, 0.41392557985700773]], [["Posterize", 0.6824268148168342, 0.21831492475831715], ["ShearY", 0.0008811906288737209, 0.1939566265644924]], [["ShearY", 0.8413370823124643, 0.7075999817793881], ["Brightness", 0.7942266192900009, 0.0384845738170444]], [["ShearY", 0.9003919463843213, 0.5068340457708402], ["AutoContrast", 0.9990937631537938, 0.35323621376481695]], [["Contrast", 0.3266913024108897, 0.5470774782762176], ["Contrast", 0.31235464476196995, 0.5723334696204473]], [["AutoContrast", 0.40137522654585955, 0.4274859892417776], ["Sharpness", 0.6173858127038773, 0.9629236289042568]], [["Sharpness", 0.3728210261025356, 0.7873518787942092], ["Solarize", 0.4319848902062112, 0.799524274852396]], [["Sharpness", 0.009379857090624758, 0.3143858944787348], ["ShearY", 0.20273037650420184, 0.3501104740582885]], [["Color", 0.1837135820716444, 0.5709648984713641], ["Solarize", 0.36312838060628455, 0.3753448575775562]], [["Cutout", 0.3400431457353702, 0.6871688775988243], ["ShearX", 0.42524570507364123, 0.7108865889616602]], [["Sharpness", 0.30703348499729893, 0.885278643437672], ["Cutout", 0.04407034125935705, 0.6821013415071144]], [["Brightness", 0.7164362367177879, 0.3383891625406651], ["Posterize", 0.002136409392137939, 0.5744439712876557]], [["Rotate", 0.757566991428807, 0.41351586654059386], ["TranslateY", 0.6716670812367449, 0.45381701497377025]], [["Color", 0.29554345831738604, 0.5747484938203239], ["Brightness", 0.6495565535422139, 0.38353714282675055]], [["Color", 0.6552239827844064, 0.6396684879350223], ["Rotate", 0.4078437959841622, 0.8229364582618871]], [["ShearX", 0.3325165311431108, 0.99875651917317], ["Cutout", 0.060614087173980605, 0.8655206968462149]], [["ShearY", 0.8591223614020521, 0.47375809606391645], ["ShearY", 0.09964216351993155, 0.7076762087109618]], [["Color", 0.9353968383925787, 0.5171703648813921], ["Cutout", 0.7542267059402566, 0.4591488152776885]], [["ShearX", 0.6832456179177027, 0.6798505733549863], ["Color", 0.7408439718746301, 0.5061967673457707]], [["Equalize", 0.4451729339243929, 0.9242958562575693], ["Posterize", 0.2426742903818478, 0.7914731845374992]], [["Posterize", 0.6241497285503436, 0.6800650930438693], ["Rotate", 0.8212761169895445, 0.42470879405266637]], [["Sharpness", 0.35467334577635123, 0.4150922293649909], ["Color", 0.38988011871489925, 0.08762395748275534]], [["Invert", 0.20231176261188386, 0.34300045056881756], ["Color", 0.6311643386438919, 0.4311911861691113]], [["Contrast", 0.2892223327756343, 0.533349670629816], ["ShearY", 0.6483243327679983, 0.37584367848303185]], [["Contrast", 0.6516401043089397, 0.3801387361685983], ["Contrast", 0.38470661862567795, 0.994720698440467]], [["Contrast", 0.44558087160644655, 0.4234506152228727], ["AutoContrast", 0.30132391715441104, 0.7758068064149011]], [["ShearY", 0.8336612877669443, 0.6961881064757953], ["TranslateX", 0.111182606133131, 0.7138593872015647]], [["Brightness", 0.7252053408816349, 0.6883715819669095], ["Cutout", 0.6664014893052573, 0.5118622737562747]], [["TranslateX", 0.04294623433241698, 0.4737274091618545], ["Solarize", 0.15848056715239178, 0.436678451116009]], [["ShearX", 0.41843604414439584, 0.5571669083243844], ["Solarize", 0.31754187268874345, 0.643294796216908]], [["Cutout", 0.308644829376876, 0.9455913104658791], ["Cutout", 0.04221174396591258, 0.8004389485099825]], [["Invert", 0.7644819805649288, 0.393641460630097], ["Posterize", 0.20832144467525543, 0.6449709932505365]], [["ShearY", 0.60954354330238, 0.45193814135157406], ["Rotate", 0.07564178568434804, 0.5700158941616946]], [["Color", 0.47993653910354905, 0.18770437256254732], ["Equalize", 0.16540989366253533, 0.3295832145751728]], [["Sharpness", 0.773656112445468, 0.899183686347773], ["AutoContrast", 0.6225833171499476, 0.8375805811436356]], [["Brightness", 0.3119630413126101, 0.21694186245727698], ["Cutout", 0.08263220622864997, 0.9910421137289533]], [["TranslateY", 0.5200200210314198, 0.44467464167817444], ["Cutout", 0.3466375681433383, 0.22385957813397142]], [["ShearY", 0.4445374219718209, 0.23917745675733915], ["Equalize", 0.32094329607540717, 0.6286388268054685]], [["Invert", 0.6194633221674505, 0.6219326801360905], ["Color", 0.43219405413154555, 0.5463431710956901]], [["ShearX", 0.5491808798436206, 0.4485147269153593], ["ShearX", 0.9624243432991532, 0.581319457926692]], [["Cutout", 0.8486066390061917, 0.48538785811340557], ["Cutout", 0.15945182827781573, 0.4114259503742423]], [["TranslateX", 0.9845485123667319, 0.7590166645874611], ["Solarize", 0.9920857955871512, 0.33259831689209834]], [["Brightness", 0.3985764491687188, 0.3516086190155328], ["Cutout", 0.13907765098725244, 0.42430309616193995]], [["Color", 0.35877942890428727, 0.363294622757879], ["Equalize", 0.4997709941984466, 0.34475754120666147]], [["Sharpness", 0.5234916035905941, 0.8988480410886609], ["AutoContrast", 0.793554237802939, 0.2575758806963965]], [["Brightness", 0.36998588693418133, 0.24144652775222428], ["Cutout", 0.06610767765334377, 0.9979246311006975]], [["TranslateY", 0.6132425595571164, 0.43952345951359123], ["Cutout", 0.361849532200793, 0.8462247954545264]], [["Posterize", 0.36953849915949677, 0.3144747463577223], ["Equalize", 0.3258985378881982, 0.6314053736452068]], [["TranslateY", 0.35835648104981205, 0.08075066564380576], ["TranslateX", 0.5242389109555177, 0.11959330395816647]], [["ShearX", 0.32773751079554303, 0.9307864751586945], ["Sharpness", 0.006921805496030664, 0.8736511230672348]], [["TranslateY", 0.48202000226401526, 0.7058919195136056], ["ShearY", 0.6998308555145181, 0.21074360071080764]], [["AutoContrast", 0.7615852152325713, 0.24914859158079972], ["Cutout", 0.8270894478252626, 0.5804285538051077]], [["AutoContrast", 0.5391662421077847, 0.5233969710179517], ["Brightness", 0.04205906143049083, 0.382677139318253]], [["Brightness", 0.6904817357054526, 0.9116378156160974], ["Invert", 0.24305250280628815, 0.2384731852843838]], [["TranslateX", 0.2661235046256291, 0.9705982948874188], ["Sharpness", 0.35821873293899625, 0.0030835471296858444]], [["Posterize", 0.39029991982997647, 0.4286238191447004], ["TranslateX", 0.08954883207184736, 0.7263973533121859]], [["Cutout", 0.040284118298638344, 0.0388330236482832], ["Posterize", 0.7807814946471116, 0.5238352731112299]], [["ShearY", 0.43556653451802413, 0.6924037743225071], ["Contrast", 0.001081515338562919, 0.7340363920548519]], [["Sharpness", 0.6966467544442373, 0.10202517317137291], ["Color", 0.18836344735972566, 0.31736252662501935]], [["Contrast", 0.6460000689193517, 0.16242196500430484], ["AutoContrast", 0.6003831047484897, 0.8612141912778188]], [["Brightness", 0.9172874494072921, 0.292364504408795], ["Solarize", 0.344602582555059, 0.7054248176903991]], [["Brightness", 0.020940469451794064, 0.5051042440134866], ["Cutout", 0.569500058123745, 0.9091247933460598]], [["Invert", 0.7367715506799225, 0.636137024500329], ["TranslateY", 0.6186960283294023, 0.37626001619073624]], [["TranslateX", 0.2863246154089121, 0.7454318730628517], ["ShearY", 0.6649909124084395, 0.37639265910774133]], [["Equalize", 0.34603376919062656, 0.9324026002997775], ["Sharpness", 0.8481669261233902, 0.14545759197862507]], [["Contrast", 0.6184370038862784, 0.8074198580702933], ["TranslateX", 0.07036135693949985, 0.46222686847401306]], [["Invert", 0.9304884364616345, 0.26298808050002387], ["Color", 0.8027813156985396, 0.7748486756116594]], [["Posterize", 0.2887993806199106, 0.9576118517235523], ["Contrast", 0.07498577510121784, 0.09131727137211232]], [["Contrast", 0.8110536569461197, 0.051038215841138386], ["Solarize", 0.8799018446258887, 0.25028365826721977]], [["Cutout", 0.006954733791187662, 0.030507696587206496], ["Brightness", 0.45329597160103124, 0.9623148451520953]], [["TranslateX", 0.7436227980344521, 0.45996857241163086], ["Solarize", 0.9682234479355196, 0.70777684485634]], [["Brightness", 0.2080557865889058, 0.025557286020371328], ["AutoContrast", 0.4786039197123853, 0.9271157120589375]], [["Solarize", 0.1822930503108656, 0.8448222682426465], ["ShearX", 0.6221001240196488, 0.207994745014715]], [["Color", 0.27879201870553094, 0.9112278219836276], ["Color", 0.7508664408516654, 0.14885798940641318]], [["ShearX", 0.5496326925552889, 0.7643918760952656], ["AutoContrast", 0.7887459433195374, 0.5993900500657054]], [["ShearY", 0.7182376017241904, 0.7470412126724141], ["Rotate", 0.7644845975844854, 0.38510752407409893]], [["Contrast", 0.7984591239416293, 0.054767400038152704], ["Posterize", 0.7324315466290486, 0.41749946919991243]], [["Contrast", 0.596887781894766, 0.14832691232456097], ["Contrast", 0.05140651977459313, 0.14459348285712803]], [["TranslateX", 0.32766681876233766, 0.5291103977440215], ["Color", 0.6039423443931029, 0.6280077043167083]], [["Invert", 0.5267106136816635, 0.9429838545064784], ["Sharpness", 0.9999053422304087, 0.24764251340211074]], [["Contrast", 0.495767451313242, 0.6744720418896594], ["Brightness", 0.2220993631062378, 0.023842431692152832]], [["Invert", 0.7609399278201697, 0.38010826932678554], ["Color", 0.8454251931688355, 0.5876680099851194]], [["Posterize", 0.24967505238473384, 0.3801835337368412], ["Contrast", 0.15106121477353399, 0.6785384814310887]], [["Invert", 0.49594153211743874, 0.32307787492774986], ["Contrast", 0.46822075688054793, 0.7106858486805577]], [["Sharpness", 0.7204076261101202, 0.5928585438185809], ["Rotate", 0.2922878012111486, 0.2742491027179961]], [["Solarize", 0.2866813728691532, 0.2856363754608978], ["TranslateY", 0.7817609208793659, 0.17156048740523572]], [["Cutout", 0.03345540659323987, 0.30068271036485605], ["ShearY", 0.2556603044234358, 0.32397855468866993]], [["TranslateY", 0.20032231858163152, 0.4577561841994639], ["Cutout", 0.8063563515601337, 0.9224365467344459]], [["TranslateY", 0.27130034613023113, 0.7446375583249849], ["ShearX", 0.8254766023480402, 0.4187078898038131]], [["ShearX", 0.2937536068210411, 0.3864492533047109], ["Contrast", 0.7069611463424469, 0.686695922492015]], [["TranslateX", 0.5869084659063555, 0.7866008068031776], ["Invert", 0.289041613918004, 0.5774431720429087]], [["Posterize", 0.6199250263408456, 0.36010044446077893], ["Color", 0.7216853388297056, 0.18586684958836489]], [["Posterize", 0.16831615585406814, 0.08052519983493259], ["Cutout", 0.7325882891023244, 0.77416439921321]], [["Posterize", 0.3000961100422498, 0.5181759282337892], ["Contrast", 0.40376073196794304, 0.613724714153924]], [["ShearX", 0.32203193464136226, 0.037459860897434916], ["Solarize", 0.961542785512965, 0.5176575408248285]], [["Posterize", 0.8986732529036036, 0.7773257927223327], ["AutoContrast", 0.9765986969928243, 0.2092264330225745]], [["Posterize", 0.7463386563644007, 0.7086671048242543], ["Posterize", 0.6433819807034994, 0.00541136425219968]], [["Contrast", 0.8810746688690078, 0.4821029611474963], ["Invert", 0.5121169325265204, 0.6360694878582249]], [["AutoContrast", 0.457606735372388, 0.6104794570624505], ["Color", 0.0020511991982608124, 0.6488142202778011]], [["Invert", 0.01744463899367027, 0.9799156424364703], ["ShearY", 0.3448213456605478, 0.04437356383800711]], [["Solarize", 0.28511589596283315, 0.283465265528744], ["Rotate", 0.6831807199089897, 0.0617176467316177]], [["Sharpness", 0.329148970281285, 0.398397318402924], ["Color", 0.9125837011914073, 0.4724426676489746]], [["Posterize", 0.05701522811381192, 0.17109014518445975], ["Cutout", 0.785885656821686, 0.39072624694455804]], [["TranslateY", 0.36644251447248277, 0.5818480868136134], ["Equalize", 0.06162286852923926, 0.710929848709861]], [["ShearY", 0.8667124241442813, 0.7556246528256454], ["ShearY", 0.505190335528531, 0.2935701441277698]], [["Brightness", 0.6369570015916268, 0.5131486964430919], ["Color", 0.4887119711633827, 0.9364572089679907]], [["Equalize", 0.06596702627228657, 0.42632445412423303], ["Equalize", 0.583434672187985, 0.045592788478947655]], [["ShearY", 0.12701084021549092, 0.501622939075192], ["Cutout", 0.7948319202684251, 0.5662618207034569]], [["Posterize", 0.24586808377061664, 0.5178008194277262], ["Contrast", 0.1647040530405073, 0.7459410952796975]], [["Solarize", 0.346601298126444, 0.02933266448415553], ["ShearY", 0.9571781647031095, 0.4992610484566735]], [["Brightness", 0.5174960605130408, 0.4387498174634591], ["AutoContrast", 0.6327403754086753, 0.8279630556620247]], [["Posterize", 0.7591448754183128, 0.6265369743070788], ["Posterize", 0.5030300462943854, 0.00401699185532868]], [["Contrast", 0.02643254602183477, 0.44677741300429646], ["Invert", 0.2921779546234399, 0.732876182854368]], [["TranslateY", 0.3516821152310867, 0.7142224211142528], ["Brightness", 0.07382104862245475, 0.45368581543623165]], [["Invert", 0.21382474908836685, 0.8413922690356168], ["Invert", 0.4082563426777157, 0.17018243778787834]], [["Brightness", 0.9533955059573749, 0.8279651051553477], ["Cutout", 0.6730769221406385, 0.07780554260470988]], [["Brightness", 0.6022173063382547, 0.6008500678386571], ["Sharpness", 0.5051909719558138, 0.002298383273851839]], [["Contrast", 0.03373395758348563, 0.3343918835437655], ["Sharpness", 0.8933651164916847, 0.21738300404986516]], [["TranslateX", 0.7095755408419822, 0.26445508146225394], ["Equalize", 0.18255527363432034, 0.38857557766574147]], [["Solarize", 0.4045911117686074, 0.009106925727519921], ["Posterize", 0.9380296936271705, 0.5485821516085955]], [["Posterize", 0.20361995432403968, 0.45378735898242406], ["AutoContrast", 0.9020357653982511, 0.7880592087609304]], [["AutoContrast", 0.9921550787672145, 0.7396130723399785], ["Cutout", 0.4203609896071977, 0.13000504717682415]], [["Equalize", 0.1917806394805356, 0.5549114911941102], ["Posterize", 0.27636900597148506, 0.02953514963949344]], [["AutoContrast", 0.5427071893197213, 0.6650127340685553], ["Color", 0.011762461060904839, 0.3793508738225649]], [["Invert", 0.18495006059896424, 0.8561476625981166], ["ShearY", 0.6417068692813954, 0.9908751019535517]], [["Solarize", 0.2992385431633619, 0.33622162977907644], ["Rotate", 0.6070550252540432, 0.010205544695142064]], [["Sharpness", 0.33292787606841845, 0.549446566149951], ["Color", 0.9097665730481233, 0.9947658451503181]], [["Posterize", 0.11207465085954937, 0.23296263754645155], ["Cutout", 0.6159972426858633, 0.38289684517298556]], [["TranslateX", 0.7343689718523805, 0.16303049089087485], ["Equalize", 0.3138385390145809, 0.6096356352129273]], [["Solarize", 0.4807269891506887, 0.28116279654856363], ["Posterize", 0.9753467973380021, 0.6327025372916857]], [["Posterize", 0.837244997106023, 0.5586046483574153], ["AutoContrast", 0.9005775602024721, 0.7983389828641411]], [["AutoContrast", 0.8347112949943837, 0.7321850307727004], ["Cutout", 0.3322676575657192, 0.14409873524237032]], [["Equalize", 0.12285967262649124, 0.5368519477089722], ["Posterize", 0.2693593445898034, 0.15098267759162076]], [["Invert", 0.331021587020619, 0.3140868578915853], ["Cutout", 0.48268387543799884, 0.7642598986625201]], [["Equalize", 0.47573794714622175, 0.8628185952549363], ["Solarize", 0.14860046214144496, 0.3739284346347912]], [["AutoContrast", 0.6747373196190459, 0.2912917979635714], ["Posterize", 0.27259573208358623, 0.9643671211873469]], [["Sharpness", 0.15019788105901233, 0.7289238028242861], ["ShearY", 0.7998448015985137, 0.5924798900807636]], [["Brightness", 0.7874052186079156, 0.9446398428550358], ["Equalize", 0.5105557539139616, 0.6719808885741001]], [["ShearX", 0.783252331899515, 0.74960184771181], ["ShearX", 0.4327935527932927, 0.29980994764698565]], [["Rotate", 0.03892023906368644, 0.24868635699639904], ["Cutout", 0.6408903979315637, 0.32135851733523907]], [["Invert", 0.9972802027590713, 0.9374194642823106], ["ShearX", 0.20016463162924894, 0.0052278586143255645]], [["AutoContrast", 0.9328687102578992, 0.44280614999256235], ["Color", 0.05637751621265141, 0.26921974769786455]], [["AutoContrast", 0.2798532308065416, 0.5283914274806746], ["Cutout", 0.12930089032151, 0.25624459046884057]], [["Invert", 0.2397428994839993, 0.31011715409282065], ["Cutout", 0.5875151915473042, 0.7454458580264322]], [["Equalize", 0.374815667651982, 0.9502053862625081], ["Solarize", 0.10100323698574426, 0.5124939317648691]], [["AutoContrast", 0.6009889057852652, 0.3080148907275367], ["Posterize", 0.6543352447742621, 0.17498668744492413]], [["Sharpness", 0.14402909409016001, 0.9239239955843186], ["ShearY", 0.8959818090635513, 0.7258262803413784]], [["Brightness", 0.8672271320432974, 0.8241439816189235], ["Equalize", 0.4954433852960082, 0.6687050430971254]], [["Solarize", 0.47813402689782114, 0.9447222576804901], ["TranslateY", 0.32546974113401694, 0.8367777573080345]], [["Sharpness", 0.48098022972519927, 0.2731904819197933], ["Rotate", 0.14601550238940067, 0.3955290089346866]], [["AutoContrast", 0.3777442613874327, 0.9991495158709968], ["TranslateY", 0.2951496731751222, 0.6276755696126608]], [["Cutout", 0.487150344941835, 0.7976642551725155], ["Solarize", 0.643407733524025, 0.6313641977306543]], [["Rotate", 0.35017053741686033, 0.23960877779589906], ["Sharpness", 0.8741761196478873, 0.12362019972427862]], [["Invert", 0.8849459784626776, 0.48532144354199647], ["Invert", 0.702430443380318, 0.924655906426149]], [["Equalize", 0.6324140359298986, 0.9780539325897597], ["AutoContrast", 0.39105074227907843, 0.3636856607173081]], [["AutoContrast", 0.8049993541952016, 0.3231157206314408], ["ShearY", 0.6675686366141409, 0.7345332792455934]], [["Sharpness", 0.12332351413693327, 0.9345179453120547], ["Solarize", 0.1594280186083361, 0.422049311332906]], [["Rotate", 0.38227253679386375, 0.7664364038099101], ["AutoContrast", 0.5725492572719726, 0.21049701651094446]], [["Brightness", 0.6432891832524184, 0.8243948738979008], ["Equalize", 0.20355899618080098, 0.7983877568044979]], [["ShearY", 0.694393675204811, 0.3686964692262895], ["TranslateX", 0.5593122846101599, 0.3378904046390629]], [["Invert", 0.9139730140623171, 0.7183505086140822], ["Posterize", 0.2675839177893596, 0.21399738931234905]], [["TranslateX", 0.05309461965184896, 0.032983777975422554], ["Sharpness", 0.412621944330688, 0.4752089612268503]], [["Equalize", 0.06901149860261116, 0.27405796188385945], ["AutoContrast", 0.7710451977604326, 0.20474249114426807]], [["ShearX", 0.47416427531072325, 0.2738614239087857], ["Cutout", 0.2820106413231565, 0.6295219975308107]], [["Cutout", 0.19984489885141582, 0.7019895950299546], ["ShearX", 0.4264722378410729, 0.8483962467724536]], [["ShearY", 0.42111446850243256, 0.1837626718066795], ["Brightness", 0.9187856196205942, 0.07478292286531767]], [["Solarize", 0.2832036589192868, 0.8253473638854684], ["Cutout", 0.7279303826662196, 0.615420010694839]], [["ShearX", 0.963251873356884, 0.5625577053738846], ["Color", 0.9637046840298858, 0.9992644813427337]], [["Invert", 0.7976502716811696, 0.43330238739921956], ["ShearY", 0.9113181667853614, 0.9066729024232627]], [["Posterize", 0.5750620807485399, 0.7729691927432935], ["Contrast", 0.4527879467651071, 0.9647739595774402]], [["Posterize", 0.5918751472569104, 0.26467375535556653], ["Posterize", 0.6347402742279589, 0.7476940787143674]], [["Invert", 0.16552404612306285, 0.9829939598708993], ["Solarize", 0.29886553921638087, 0.22487098773064948]], [["Cutout", 0.24209211313246753, 0.5522928952260516], ["AutoContrast", 0.6212831649673523, 0.4191071063984261]], [["ShearX", 0.4726406722647257, 0.26783614257572447], ["TranslateY", 0.251078162624763, 0.26103450676044304]], [["Cutout", 0.8721775527314426, 0.6284108541347894], ["ShearX", 0.7063325779145683, 0.8467168866724094]], [["ShearY", 0.42226987564279606, 0.18012694533480308], ["Brightness", 0.858499853702629, 0.4738929353785444]], [["Solarize", 0.30039851082582764, 0.8151511479162529], ["Cutout", 0.7228873804059033, 0.6174351379837011]], [["ShearX", 0.4921198221896609, 0.5678998037958154], ["Color", 0.7865298825314806, 0.9309020966406338]], [["Invert", 0.8077821007916464, 0.7375015762124386], ["Cutout", 0.032464574567796195, 0.25405044477004846]], [["Color", 0.6061325441870133, 0.2813794250571565], ["TranslateY", 0.5882949270385848, 0.33262043078220227]], [["ShearX", 0.7877331864215293, 0.8001131937448647], ["Cutout", 0.19828215489868783, 0.5949317580743655]], [["Contrast", 0.529508728421701, 0.36477855845285007], ["Color", 0.7145481740509138, 0.2950794787786947]], [["Contrast", 0.9932891064746089, 0.46930062926732646], ["Posterize", 0.9033014136780437, 0.5745902253320527]]]
return p
class Augmentation(object):
def __init__(self, policies):
self.policies = policies
def __call__(self, img):
for _ in range(1):
policy = random.choice(self.policies)
for name, pr, level in policy:
if random.random() > pr:
continue
img = apply_augment(img, name, level)
return img
cifar10_faa = Augmentation(fa_reduced_cifar10())
svhn_faa = Augmentation(fa_reduced_svhn())
| 117,783
| 392.926421
| 55,120
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/autoaugment.py
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transforms used in the Augmentation Policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
# pylint:disable=g-multiple-import
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
# pylint:enable=g-multiple-import
IMAGE_SIZE = 32
# What is the dataset mean and std of the images on the training set
PARAMETER_MAX = 30 # What is the max 'level' a transform could be predicted
def pil_wrap(img):
"""Convert the `img` numpy tensor to a PIL Image."""
return img.convert('RGBA')
def pil_unwrap(img):
"""Converts the PIL img to a numpy array."""
return img.convert('RGB')
def apply_policy(policy, img, use_fixed_posterize=False):
"""Apply the `policy` to the numpy `img`.
Args:
policy: A list of tuples with the form (name, probability, level) where
`name` is the name of the augmentation operation to apply, `probability`
is the probability of applying the operation and `level` is what strength
the operation to apply.
img: Numpy image that will have `policy` applied to it.
Returns:
The result of applying `policy` to `img`.
"""
nametotransform = fixed_AA_NAME_TO_TRANSFORM if use_fixed_posterize else AA_NAME_TO_TRANSFORM
pil_img = pil_wrap(img)
for xform in policy:
assert len(xform) == 3
name, probability, level = xform
xform_fn = nametotransform[name].pil_transformer(probability, level)
pil_img = xform_fn(pil_img)
return pil_unwrap(pil_img)
def random_flip(x):
"""Flip the input x horizontally with 50% probability."""
if np.random.rand(1)[0] > 0.5:
return np.fliplr(x)
return x
def zero_pad_and_crop(img, amount=4):
"""Zero pad by `amount` zero pixels on each side then take a random crop.
Args:
img: numpy image that will be zero padded and cropped.
amount: amount of zeros to pad `img` with horizontally and verically.
Returns:
The cropped zero padded img. The returned numpy array will be of the same
shape as `img`.
"""
padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,
img.shape[2]))
padded_img[amount:img.shape[0] + amount, amount:
img.shape[1] + amount, :] = img
top = np.random.randint(low=0, high=2 * amount)
left = np.random.randint(low=0, high=2 * amount)
new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]
return new_img
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / PARAMETER_MAX)
class TransformFunction(object):
"""Wraps the Transform function for pretty printing options."""
def __init__(self, func, name):
self.f = func
self.name = name
def __repr__(self):
return '<' + self.name + '>'
def __call__(self, pil_img):
return self.f(pil_img)
class TransformT(object):
"""Each instance of this class represents a specific transform."""
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def pil_transformer(self, probability, level):
def return_function(im):
if random.random() < probability:
im = self.xform(im, level)
return im
name = self.name + '({:.1f},{})'.format(probability, level)
return TransformFunction(return_function, name)
def do_transform(self, image, level):
f = self.pil_transformer(PARAMETER_MAX, level)
return f(image)
################## Transform Functions ##################
identity = TransformT('identity', lambda pil_img, level: pil_img)
flip_lr = TransformT(
'FlipLR',
lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT))
flip_ud = TransformT(
'FlipUD',
lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM))
# pylint:disable=g-long-lambda
auto_contrast = TransformT(
'AutoContrast',
lambda pil_img, level: ImageOps.autocontrast(
pil_img.convert('RGB')).convert('RGBA'))
equalize = TransformT(
'Equalize',
lambda pil_img, level: ImageOps.equalize(
pil_img.convert('RGB')).convert('RGBA'))
invert = TransformT(
'Invert',
lambda pil_img, level: ImageOps.invert(
pil_img.convert('RGB')).convert('RGBA'))
# pylint:enable=g-long-lambda
blur = TransformT(
'Blur', lambda pil_img, level: pil_img.filter(ImageFilter.BLUR))
smooth = TransformT(
'Smooth',
lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH))
def _rotate_impl(pil_img, level):
"""Rotates `pil_img` from -30 to 30 degrees depending on `level`."""
degrees = int_parameter(level, 30)
if random.random() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees)
rotate = TransformT('Rotate', _rotate_impl)
def _posterize_impl(pil_img, level):
"""Applies PIL Posterize to `pil_img`."""
level = int_parameter(level, 4)
return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA')
posterize = TransformT('Posterize', _posterize_impl)
def _fixed_posterize_impl(pil_img, level):
"""Applies PIL Posterize to `pil_img`."""
level = int_parameter(level, 4)
return ImageOps.posterize(pil_img.convert('RGB'), 8 - level).convert('RGBA')
fixed_posterize = TransformT('Posterize', _fixed_posterize_impl)
def _shear_x_impl(pil_img, level):
"""Applies PIL ShearX to `pil_img`.
The ShearX operation shears the image along the horizontal axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, level, 0, 0, 1, 0))
shear_x = TransformT('ShearX', _shear_x_impl)
def _shear_y_impl(pil_img, level):
"""Applies PIL ShearY to `pil_img`.
The ShearY operation shears the image along the vertical axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, level, 1, 0))
shear_y = TransformT('ShearY', _shear_y_impl)
def _translate_x_impl(pil_img, level):
"""Applies PIL TranslateX to `pil_img`.
Translate the image in the horizontal direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateX applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, level, 0, 1, 0))
translate_x = TransformT('TranslateX', _translate_x_impl)
def _translate_y_impl(pil_img, level):
"""Applies PIL TranslateY to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateY applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, 0, 1, level))
translate_y = TransformT('TranslateY', _translate_y_impl)
def _crop_impl(pil_img, level, interpolation=Image.BILINEAR):
"""Applies a crop to `pil_img` with the size depending on the `level`."""
cropped = pil_img.crop((level, level, IMAGE_SIZE - level, IMAGE_SIZE - level))
resized = cropped.resize((IMAGE_SIZE, IMAGE_SIZE), interpolation)
return resized
crop_bilinear = TransformT('CropBilinear', _crop_impl)
def _solarize_impl(pil_img, level):
"""Applies PIL Solarize to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had Solarize applied to it.
"""
level = int_parameter(level, 256)
return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')
solarize = TransformT('Solarize', _solarize_impl)
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL."""
def impl(pil_img, level):
v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
color = TransformT('Color', _enhancer_impl(ImageEnhance.Color))
contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast))
brightness = TransformT('Brightness', _enhancer_impl(
ImageEnhance.Brightness))
sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness))
def create_cutout_mask(img_height, img_width, num_channels, size):
"""Creates a zero mask used for cutout of shape `img_height` x `img_width`.
Args:
img_height: Height of image cutout mask will be applied to.
img_width: Width of image cutout mask will be applied to.
num_channels: Number of channels in the image.
size: Size of the zeros mask.
Returns:
A mask of shape `img_height` x `img_width` with all ones except for a
square of zeros of shape `size` x `size`. This mask is meant to be
elementwise multiplied with the original image. Additionally returns
the `upper_coord` and `lower_coord` which specify where the cutout mask
will be applied.
"""
assert img_height == img_width
# Sample center where cutout mask will be applied
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
# Determine upper right and lower left corners of patch
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = np.ones((img_height, img_width, num_channels))
zeros = np.zeros((mask_height, mask_width, num_channels))
mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (
zeros)
return mask, upper_coord, lower_coord
def _cutout_pil_impl(pil_img, level):
"""Apply cutout to pil_img at the specified level."""
size = int_parameter(level, 20)
if size <= 0:
return pil_img
img_height, img_width, num_channels = (32, 32, 3)
_, upper_coord, lower_coord = (
create_cutout_mask(img_height, img_width, num_channels, size))
pixels = pil_img.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly
return pil_img
cutout = TransformT('Cutout', _cutout_pil_impl)
ALL_TRANSFORMS = [
identity,
auto_contrast,
equalize,
rotate,
posterize,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
]
AA_ALL_TRANSFORMS = [
flip_lr,
flip_ud,
auto_contrast,
equalize,
invert,
rotate,
posterize,
crop_bilinear,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
cutout,
blur,
smooth
]
fixed_AA_ALL_TRANSFORMS = [
flip_lr,
flip_ud,
auto_contrast,
equalize,
invert,
rotate,
fixed_posterize,
crop_bilinear,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
cutout,
blur,
smooth
]
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
def __call__(self, img):
img = pil_wrap(img)
ops = random.choices(ALL_TRANSFORMS, k=self.n)
for op in ops:
img = op.pil_transformer(1.,self.m)(img)
img = pil_unwrap(img)
return img
AA_NAME_TO_TRANSFORM = {t.name: t for t in AA_ALL_TRANSFORMS}
fixed_AA_NAME_TO_TRANSFORM = {t.name: t for t in fixed_AA_ALL_TRANSFORMS}
NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS}
def good_policies():
"""AutoAugment policies found on Cifar."""
exp0_0 = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]
exp0_1 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)]]
exp0_2 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],
[('Equalize', 0.7, 5), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)]]
exp0_3 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.9, 9)],
[('AutoContrast', 0.8, 0), ('TranslateY', 0.7, 9)],
[('TranslateY', 0.2, 7), ('Color', 0.9, 6)],
[('Equalize', 0.7, 6), ('Color', 0.4, 9)]]
exp1_0 = [
[('ShearY', 0.2, 7), ('Posterize', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]
exp1_1 = [
[('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],
[('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],
[('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],
[('TranslateY', 0.2, 4), ('Sharpness', 0.3, 3)],
[('Brightness', 0.0, 8), ('Color', 0.8, 8)]]
exp1_2 = [
[('Solarize', 0.2, 6), ('Color', 0.8, 6)],
[('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],
[('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],
[('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],
[('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]
exp1_3 = [
[('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],
[('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],
[('Contrast', 0.5, 1), ('TranslateY', 0.2, 9)],
[('AutoContrast', 0.6, 5), ('TranslateY', 0.0, 9)],
[('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]
exp1_4 = [
[('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],
[('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],
[('Equalize', 0.6, 8), ('Color', 0.6, 2)],
[('Color', 0.3, 7), ('Color', 0.2, 4)],
[('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]
exp1_5 = [
[('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],
[('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],
[('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],
[('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],
[('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]
exp1_6 = [
[('Equalize', 0.8, 4), ('TranslateY', 0.8, 9)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.6, 9)],
[('TranslateY', 0.9, 0), ('TranslateY', 0.5, 9)],
[('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],
[('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]
exp2_0 = [
[('Color', 0.7, 7), ('TranslateX', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 8)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]
exp2_1 = [
[('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],
[('Cutout', 0.2, 4), ('Equalize', 0.1, 1)],
[('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],
[('Color', 0.1, 8), ('ShearY', 0.2, 3)],
[('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]
exp2_2 = [
[('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],
[('TranslateY', 0.3, 6), ('Cutout', 0.3, 3)],
[('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],
[('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],
[('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]
exp2_3 = [
[('Equalize', 0.9, 5), ('Color', 0.7, 0)],
[('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],
[('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],
[('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],
[('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]
exp2_4 = [
[('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],
[('TranslateX', 0.3, 0), ('TranslateX', 0.6, 0)],
[('Equalize', 0.5, 9), ('TranslateY', 0.6, 7)],
[('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],
[('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]
exp2_5 = [
[('AutoContrast', 0.3, 9), ('Cutout', 0.5, 3)],
[('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],
[('ShearX', 0.0, 3), ('Posterize', 0.0, 3)],
[('Solarize', 0.4, 3), ('Color', 0.2, 4)],
[('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]
exp2_6 = [
[('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],
[('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],
[('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],
[('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]
exp2_7 = [
[('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],
[('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],
[('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],
[('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]
exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3
exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6
exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7
return exp0s + exp1s + exp2s
cifar_gp = good_policies()
first_aug_ops = [("ShearX",0.9,4), ("ShearY",0.9,8), ("Equalize",0.6,5), ("Invert",0.9,3), ("Equalize",0.6,1), ("ShearX",0.9,4), ("ShearY",0.9,8), ("ShearY",0.9,5), ("Invert",0.9,6), ("Equalize",0.6,3), ("ShearX",0.9,4), ("ShearY",0.8,8), ("Equalize",0.9,5), ("Invert",0.9,4), ("Contrast",0.3,3), ("Invert",0.8,5), ("ShearY",0.7,6), ("Invert",0.6,4), ("ShearY",0.3,7), ("ShearX",0.1,6), ("Solarize",0.7,2), ("ShearY",0.8,4), ("ShearX",0.7,9), ("ShearY",0.8,5), ("ShearX",0.7,2)]
second_aug_ops = [("Invert",0.2,3), ("Invert",0.7,5), ("Solarize",0.6,6), ("Equalize",0.6,3), ("Rotate",0.9,3), ("AutoContrast",0.8,3), ("Invert",0.4,5), ("Solarize",0.2,6), ("AutoContrast",0.8,1), ("Rotate",0.9,3), ("Solarize",0.3,3), ("Invert",0.7,4), ("TranslateY",0.6,6), ("Equalize",0.6,7), ("Rotate",0.8,4), ("TranslateY",0.0,2), ("Solarize",0.4,8), ("Rotate",0.8,4), ("TranslateX",0.9,3), ("Invert",0.6,5), ("TranslateY",0.6,7), ("Invert",0.8,8), ("TranslateY",0.8,3), ("AutoContrast",0.7,3), ("Invert",0.1,5)]
svhn_gp = [[a1, a2] for a1, a2 in zip(first_aug_ops,second_aug_ops)]
class CifarAutoAugment:
def __init__(self, fixed_posterize):
self.fixed_posterize = fixed_posterize
def __call__(self, img):
epoch_policy = cifar_gp[np.random.choice(len(cifar_gp))]
final_img = apply_policy(epoch_policy, img, use_fixed_posterize=self.fixed_posterize)
return final_img
class SVHNAutoAugment:
def __init__(self, fixed_posterize):
self.fixed_posterize = fixed_posterize
def __call__(self, img):
epoch_policy = svhn_gp[np.random.choice(len(svhn_gp))]
final_img = apply_policy(epoch_policy, img, use_fixed_posterize=self.fixed_posterize)
return final_img
| 21,409
| 32.34891
| 517
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/common.py
|
import logging
import warnings
import random
from copy import copy
from typing import Union
from collections import Counter
import numpy as np
import torch
from torch.utils.checkpoint import check_backward_validity, detach_variable, get_device_states, set_device_states
from torchvision.datasets import VisionDataset, CIFAR10, CIFAR100, ImageFolder
from torch.utils.data import Subset, ConcatDataset
from PIL import Image
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
def get_logger(name, level=logging.DEBUG):
logger = logging.getLogger(name)
logger.handlers.clear()
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def add_filehandler(logger, filepath):
fh = logging.FileHandler(filepath)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
def copy_and_replace_transform(ds: Union[CIFAR10, ImageFolder, Subset], transform):
assert ds.dataset.transform is not None if isinstance(ds,Subset) else (all(d.transform is not None for d in ds.datasets) if isinstance(ds,ConcatDataset) else ds.transform is not None) # make sure still uses old style transform
if isinstance(ds, Subset):
new_super_ds = copy(ds.dataset)
new_super_ds.transform = transform
new_ds = copy(ds)
new_ds.dataset = new_super_ds
elif isinstance(ds, ConcatDataset):
def copy_and_replace_transform(ds):
new_ds = copy(ds)
new_ds.transform = transform
return new_ds
new_ds = ConcatDataset([copy_and_replace_transform(d) for d in ds.datasets])
else:
new_ds = copy(ds)
new_ds.transform = transform
return new_ds
def apply_weightnorm(nn):
def apply_weightnorm_(module):
if 'Linear' in type(module).__name__ or 'Conv' in type(module).__name__:
torch.nn.utils.weight_norm(module, name='weight', dim=0)
nn.apply(apply_weightnorm_)
def shufflelist_with_seed(lis, seed='2020'):
s = random.getstate()
random.seed(seed)
random.shuffle(lis)
random.setstate(s)
def stratified_split(labels, val_share):
assert isinstance(labels, list)
counter = Counter(labels)
indices_per_label = {label: [i for i,l in enumerate(labels) if l == label] for label in counter}
per_label_split = {}
for label, count in counter.items():
indices = indices_per_label[label]
assert count == len(indices)
shufflelist_with_seed(indices, f'2020_{label}_{count}')
train_val_border = round(count*(1.-val_share))
per_label_split[label] = (indices[:train_val_border], indices[train_val_border:])
final_split = ([],[])
for label, split in per_label_split.items():
for f_s, s in zip(final_split, split):
f_s.extend(s)
shufflelist_with_seed(final_split[0], '2020_yoyo')
shufflelist_with_seed(final_split[1], '2020_yo')
return final_split
def denormalize(img, mean, std):
mean, std = torch.tensor(mean).to(img.device), torch.tensor(std).to(img.device)
return img.mul_(std[:,None,None]).add_(mean[:,None,None])
def normalize(img, mean, std):
mean, std = torch.tensor(mean).to(img.device), torch.tensor(std).to(img.device)
return img.sub_(mean[:,None,None]).div_(std[:,None,None])
| 3,469
| 34.408163
| 230
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/metrics.py
|
import copy
import torch
from collections import defaultdict
from torch import nn
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0)
res.append(correct_k.mul_(1. / batch_size))
return res
def cross_entropy_smooth(input, target, size_average=True, label_smoothing=0.1):
y = torch.eye(10).cuda()
lb_oh = y[target]
target = lb_oh * (1 - label_smoothing) + 0.5 * label_smoothing
logsoftmax = nn.LogSoftmax()
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
class Accumulator:
def __init__(self):
self.metrics = defaultdict(lambda: 0.)
def add(self, key, value):
self.metrics[key] += value
def add_dict(self, dict):
for key, value in dict.items():
self.add(key, value)
def __getitem__(self, item):
return self.metrics[item]
def __setitem__(self, key, value):
self.metrics[key] = value
def __contains__(self, item):
return self.metrics.__contains__(item)
def get_dict(self):
return copy.deepcopy(dict(self.metrics))
def items(self):
return self.metrics.items()
def __str__(self):
return str(dict(self.metrics))
def __truediv__(self, other):
newone = Accumulator()
for key, value in self.items():
newone[key] = value / other
return newone
def divide(self, divisor, **special_divisors):
newone = Accumulator()
for key, value in self.items():
if key in special_divisors:
newone[key] = value/special_divisors[key]
else:
newone[key] = value/divisor
return newone
class SummaryWriterDummy:
def __init__(self, log_dir):
pass
def add_scalar(self, *args, **kwargs):
pass
def add_image(self, *args, **kwargs):
pass
| 2,281
| 24.076923
| 80
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/__init__.py
| 0
| 0
| 0
|
py
|
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/train.py
|
import itertools
import json, csv
import logging
import math
import os
from collections import OrderedDict
import gc
import tempfile
import pickle
from dataclasses import dataclass
import random
from time import time
import numpy as np
import torch
from torch import nn, optim
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision import transforms
from tqdm import tqdm
import yaml
from theconf import Config as C, ConfigArgumentParser
from argparse import ArgumentParser
from DeepAA_evaluate.common import get_logger
from DeepAA_evaluate.data import get_dataloaders, mixup_data
from DeepAA_evaluate.lr_scheduler import adjust_learning_rate_resnet
from DeepAA_evaluate.metrics import accuracy, Accumulator
from DeepAA_evaluate.networks import get_model, num_class
from warmup_scheduler import GradualWarmupScheduler
import aug_lib
logger = get_logger('DeepAA_evaluate')
logger.setLevel(logging.DEBUG)
def run_epoch(rank, worldsize, model, loader, loss_fn, optimizer, desc_default='', epoch=0, writer=None, verbose=1, scheduler=None,sample_pairing_loader=None):
tqdm_disable = bool(os.environ.get('TASK_NAME', '')) # KakaoBrain Environment
if verbose:
logging_loader = tqdm(loader, disable=tqdm_disable)
logging_loader.set_description('[%s %04d/%04d]' % (desc_default, epoch, C.get()['epoch']))
else:
logging_loader = loader
metrics = Accumulator()
cnt = 0
eval_cnt = 0
total_steps = len(loader)
steps = 0
gc.collect()
torch.cuda.empty_cache()
#print('mem usage', resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
communicate_grad_every = C.get().get('communicate_grad_every', 1)
before_load_time = time()
if C.get().get('load_sample_pairing_batch',False) and sample_pairing_loader is not None:
sample_pairing_iter = iter(sample_pairing_loader)
aug_lib.blend_images = [transforms.ToPILImage()(sample_pairing_loader.denorm(ti)) for ti in
next(sample_pairing_iter)[0]]
for batch_idx, batch in enumerate(logging_loader): # logging loader might be a loader or a loader wrapped into tqdm
data, label = batch[:2]
steps += 1
if C.get().get('load_sample_pairing_batch',False) and sample_pairing_loader is not None:
try:
aug_lib.blend_images = [transforms.ToPILImage()(sample_pairing_loader.denorm(ti)) for ti in next(sample_pairing_iter)[0]]
except StopIteration:
print("Blend images iterator ended. If this is printed twice per loop, there is something out-of-order.")
pass
if worldsize > 1:
data, label = data.to(rank), label.to(rank)
else:
data, label = data.cuda(), label.cuda()
if C.get().get('mixup', 0) > 0 and 'train' in desc_default:
data, label_a, label_b, lam = mixup_data(data, label, C.get().get('mixup', 0))
preds = model(data)
loss = lam * loss_fn(preds, label_a) + (1.0 - lam) * loss_fn(preds, label_b)
else:
preds = model(data)
loss = loss_fn(preds, label)
if C.get().get('label_smooth', 0) > 0 and 'train' in desc_default:
smooth = C.get().get('label_smooth', 0)
loss = (1.0-smooth) * loss - smooth * torch.nn.functional.log_softmax(preds, dim=-1).mean()
communicate_grad = steps % communicate_grad_every == 0
just_communicated_grad = steps % communicate_grad_every == 1 # also is true in first step of each epoch
if optimizer and (communicate_grad_every == 1 or just_communicated_grad):
optimizer.zero_grad()
if optimizer:
if communicate_grad:
loss.backward()
else:
with model.no_sync():
loss.backward()
if C.get()['optimizer'].get('clip', 5) > 0:
nn.utils.clip_grad_norm_(model.parameters(), C.get()['optimizer'].get('clip', 5))
if (steps-1) % C.get().get('step_optimizer_every', 1) == C.get().get('step_optimizer_nth_step', 0): # default is to step on the first step of each pack
optimizer.step()
#print(f"Time for forward/backward {time()-fb_time}")
top1, top5 = accuracy(preds, label, (1, 5))
metrics.add_dict({
'loss': loss.item() * len(data),
'top1': top1.item() * len(data),
'top5': top5.item() * len(data),
'top1_error': (1.0 - top1.item()) * len(data),
'top5_error': (1.0 - top5.item()) * len(data),
})
if steps % 2 == 0:
metrics.add('eval_top1', top1.item() * len(data)) # times 2 since it is only recorded every sec step
eval_cnt += len(data)
cnt += len(data)
if verbose:
postfix = metrics.divide(cnt, eval_top1=eval_cnt)
if optimizer:
postfix['lr'] = optimizer.param_groups[0]['lr']
logging_loader.set_postfix(postfix)
if scheduler is not None:
scheduler.step(epoch - 1 + float(steps) / total_steps)
# visualize augmented images
#before_load_time = time()
del preds, loss, top1, top5, data, label
if tqdm_disable:
if optimizer:
logger.info('[%s %03d/%03d] %s lr=%.6f', desc_default, epoch, C.get()['epoch'], metrics.divide(cnt, eval_top1=eval_cnt), optimizer.param_groups[0]['lr'])
else:
logger.info('[%s %03d/%03d] %s', desc_default, epoch, C.get()['epoch'], metrics.divide(cnt, eval_top1=eval_cnt))
metrics = metrics.divide(cnt, eval_top1=eval_cnt)
if optimizer:
metrics.metrics['lr'] = optimizer.param_groups[0]['lr']
if verbose:
for key, value in metrics.items():
writer.add_scalar(key, value, epoch)
return metrics
def train_and_eval(rank, worldsize, tag, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False):
if not reporter:
reporter = lambda **kwargs: 0
if not tag or (worldsize and torch.distributed.get_rank() > 0):
from DeepAA_evaluate.metrics import SummaryWriterDummy as SummaryWriter
logger.warning('tag not provided or rank > 0 -> no tensorboard log.')
else:
from tensorboardX import SummaryWriter
os.makedirs('./logs/', exist_ok=True)
writers = [SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test', 'testtrain']]
aug_lib.set_augmentation_space(C.get().get('augmentation_search_space', 'standard'), C.get().get('augmentation_parameter_max', 30), C.get().get('custom_search_space_augs', None))
max_epoch = C.get()['epoch']
trainsampler, trainloader, validloader, testloader_, testtrainloader_, dataset_info = get_dataloaders(C.get()['dataset'], C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, distributed=worldsize>1, started_with_spawn=C.get()['started_with_spawn'], summary_writer=writers[0])
# create a model & an optimizer
model_conf = C.get()['model']
model = get_model(model_conf, C.get()['batch'], num_class(C.get()['dataset']), writer=writers[0]) #
if worldsize > 1:
model = DDP(model.to(rank), device_ids=[rank])
else:
model = model.to('cuda:0')
criterion = nn.CrossEntropyLoss()
bn_parameters = sum([list(m.parameters()) for m in model.modules() if isinstance(m, torch.nn.modules.batchnorm._BatchNorm)], [])
other_parameters = [param for param in model.parameters() if id(param) not in [id(p) for p in bn_parameters]]
assert len(list(model.parameters())) == len(bn_parameters) + len(other_parameters), 'Some parameters are missing'
if C.get()['optimizer']['type'] == 'sgd':
optimizer = optim.SGD(
[{'params': bn_parameters, 'weight_decay': 0},
{'params': other_parameters}],
lr=C.get()['lr'],
momentum=C.get()['optimizer'].get('momentum', 0.9),
weight_decay=C.get()['optimizer']['decay'],
nesterov=C.get()['optimizer']['nesterov']
)
elif C.get()['optimizer']['type'] == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=C.get()['lr'],
betas=(C.get()['optimizer'].get('momentum',.9),.999)
)
else:
raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type'])
lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine')
if lr_scheduler_type == 'cosine':
warmup_epochs = 0
if C.get()['lr_schedule'].get('warmup', None):
warmup_epochs = C.get()['lr_schedule']['warmup']['epoch']
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
T_max=C.get()['epoch'] - warmup_epochs,
eta_min=0.)
elif lr_scheduler_type == 'resnet':
scheduler = adjust_learning_rate_resnet(optimizer)
elif lr_scheduler_type == 'constant':
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda e: 1.)
else:
raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type)
if C.get()['lr_schedule'].get('warmup', None):
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=C.get()['lr_schedule']['warmup']['multiplier'],
total_epoch=C.get()['lr_schedule']['warmup']['epoch'],
after_scheduler=scheduler
)
result = OrderedDict()
epoch_start = 1
if save_path and os.path.exists(save_path):
logger.info('%s file found. loading...' % save_path)
data = torch.load(save_path, map_location='cpu')
if 'model' in data or 'state_dict' in data:
key = 'model' if 'model' in data else 'state_dict'
logger.info('checkpoint epoch@%d' % data['epoch'])
if C.get().get('load_main_model', False):
# model.load_state_dict(data[key])
if not isinstance(model, DDP):
model.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})
else:
model.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})
optimizer.load_state_dict(data['optimizer'])
if data['epoch'] < C.get()['epoch']:
epoch_start = data['epoch'] + 1
else:
only_eval = True
else:
#model.load_state_dict({k: v for k, v in data.items()})
raise ValueError(f"Wrong format of data in save path: {save_path}.")
del data
else:
logger.info('"%s" file not found. skip to pretrain weights...' % save_path)
if only_eval:
logger.warning('model checkpoint not found. only-evaluation mode is off.')
only_eval = False
if only_eval:
logger.info('evaluation only+')
model.eval()
rs = dict()
with torch.no_grad():
rs['train'] = run_epoch(rank, worldsize, model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0])
#rs['valid'] = run_epoch(rank, worldsize, model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1])
rs['test'] = run_epoch(rank, worldsize, model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2])
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'test']):
if setname not in rs:
continue
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = 0
return result
# train loop
best_top1 = 0
for epoch in range(epoch_start, max_epoch + 1):
if worldsize > 1:
trainsampler.set_epoch(epoch)
model.train()
rs = dict()
rs['train'] = run_epoch(rank, worldsize,model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=True, scheduler=scheduler, sample_pairing_loader=testtrainloader_)
model.eval()
if math.isnan(rs['train']['loss']):
raise Exception('train loss is NaN.')
if epoch % C.get().get('test_interval', 20) == 0 or epoch > max_epoch-5:
with torch.no_grad():
if C.get().get('compute_testtrain', False):
rs['testtrain'] = run_epoch(rank, worldsize, model, testtrainloader_, criterion, None, desc_default='testtrain', epoch=epoch, writer=writers[3], verbose=True)
rs['test'] = run_epoch(rank, worldsize, model, testloader_, criterion, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=True)
if metric == 'last' or rs[metric]['top1'] > best_top1:
if metric != 'last':
best_top1 = rs[metric]['top1']
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'test', 'testtrain']):
if setname in rs and key in rs[setname]:
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = epoch
#writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch)
writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch)
reporter(
loss_valid=rs['test']['loss'], top1_valid=rs['test']['top1'],
loss_test=rs['test']['loss'], top1_test=rs['test']['top1']
)
# save checkpoint
if save_path and C.get().get('save_model', True) and (worldsize <= 1 or torch.distributed.get_rank() == 0):
logger.info('save model@%d to %s' % (epoch, save_path))
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict()
}, save_path)
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict()
}, save_path.replace('.pth', '_e%d_top1_%.3f_%.3f' % (epoch, rs['train']['top1'], rs['test']['top1']) + '.pth'))
early_finish_epoch = C.get().get('early_finish_epoch', None)
if early_finish_epoch == epoch:
break
del model
return result
def setup(global_rank, local_rank, world_size, port_suffix):
torch.cuda.set_device(local_rank)
if port_suffix is not None:
if C.get().get('master_addr', None) is None:
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = f'12{port_suffix}'
# initialize the process group
dist.init_process_group("nccl", rank=global_rank, world_size=world_size)
else:
assert C.get().get('master_port', None) is not None
# os.environ['MASTER_ADDR'] = C.get()['master_addr']
# os.environ['MASTER_PORT'] = '12{}'.format(C.get()['master_port'])
master_url = 'tcp://{}:{}'.format(C.get()['master_addr'], C.get()['master_port'])
# initialize the process group
dist.init_process_group("nccl", rank=global_rank, world_size=world_size, init_method=master_url)
return global_rank, world_size
else:
dist.init_process_group(backend='NCCL', init_method='env://')
return torch.distributed.get_rank(), torch.distributed.get_world_size()
def cleanup():
dist.destroy_process_group()
def parse_args():
parser = ConfigArgumentParser(conflict_handler='resolve')
parser.add_argument('--tag', type=str, default='')
parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels',
help='torchvision data folder')
parser.add_argument('--save', type=str, default='')
parser.add_argument('--cv-ratio', type=float, default=0.0)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--only-eval', action='store_true')
parser.add_argument('--local_rank', default=None, type=int)
return parser.parse_args()
def spawn_process(global_rank, worldsize, port_suffix, args, config_path=None, communicate_results_with_queue=None, local_rank=None, node_id=None):
if config_path is not None:
C(config_path)
if local_rank is None:
if C.get().get('num_nodes', 1) == 1:
local_rank = global_rank
else:
local_rank = global_rank
global_rank = local_rank + n_gpus_per_node * node_id
print('local_rank={}, global_rank={}, world_size={}, Master={}, 12{}'.format(local_rank, global_rank, worldsize, C.get()['master_addr'], C.get()['master_port']))
started_with_spawn = worldsize is not None and worldsize > 0
if worldsize != 0:
global_rank, worldsize = setup(global_rank, local_rank, worldsize, port_suffix)
print('dist info', local_rank,global_rank,worldsize)
#communicate_results_with_queue.value = 1.
#return
C.get()['started_with_spawn'] = started_with_spawn
if worldsize:
assert worldsize == C.get()['gpus'], f"Did not specify the number of GPUs in Config with which it was started: {worldsize} vs {C.get()['gpus']}"
else:
assert 'gpus' not in C.get() or C.get()['gpus'] == 1
assert (args.only_eval and args.save) or not args.only_eval, 'checkpoint path not provided in evaluation mode.'
if not args.only_eval:
if args.save:
logger.info('checkpoint will be saved at %s' % args.save)
else:
logger.warning('Provide --save argument to save the checkpoint. Without it, training result will not be saved!')
#if args.save:
#add_filehandler(logger, args.save.replace('.pth', '.log'))
#logger.info(json.dumps(C.get().conf, indent=4))
torch.backends.cudnn.benchmark = True
if 'seed' in C.get():
seed = C.get()['seed']
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
#torch.backends.cudnn.benchmark = False
import time
t = time.time()
result = train_and_eval(local_rank, worldsize, args.tag, args.dataroot, test_ratio=args.cv_ratio, cv_fold=args.cv, save_path=args.save, only_eval=args.only_eval, metric='last')
elapsed = time.time() - t
print('done')
logger.info(f'done on rank {global_rank}.')
logger.info('model: %s' % C.get()['model'])
logger.info('augmentation: %s' % C.get()['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info(args.save)
if worldsize:
cleanup()
if global_rank == 0 and communicate_results_with_queue is not None:
#communicate_results_with_queue.put([result])
communicate_results_with_queue.value = result['top1_test']
@dataclass
class Args:
tag: str = ''
dataroot: str = None
save: str = ''
cv_ratio: float = 0.
cv: int = 0
only_eval: bool = False
local_rank: None = None
def run_from_py(dataroot, config_dict, save=''):
args = Args(dataroot=dataroot, save=save)
with tempfile.NamedTemporaryFile(mode='w+') as f, tempfile.NamedTemporaryFile() as result_file:
path = f.name
yaml.dump(config_dict, f)
world_size = torch.cuda.device_count()
port_suffix = str(random.randint(100, 999))
#result_queue = mp.get_context('spawn').Queue()
result_queue = mp.get_context('spawn').Value('d',.0)
if world_size > 1:
outcome = mp.spawn(spawn_process,
args=(world_size, port_suffix, args, path, result_queue),
nprocs=world_size,
join=True)
else:
outcome = spawn_process(0, 0, port_suffix, args, path, result_queue)
#result = result_queue.get()[0]
result = result_queue.value
return result
n_gpus_per_node = torch.cuda.device_count()
if __name__ == '__main__':
pre_parser = ArgumentParser()
pre_parser.add_argument('--local_rank', default=None, type=int)
args, _ = pre_parser.parse_known_args()
parsed_args = parse_args()
# generate CSV file:
if C.get().get('save_to_csv', False):
if not os.path.isfile('eval_performance.csv'):
with open('eval_performance.csv', mode='w') as csv_file:
fieldnames = ['decay', 'warmup_multiplier', 'epoch', 'top1_test', 'top1_train', 'top5_test', 'top5_train']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
if args.local_rank is None:
print("Spawning processes")
world_size = n_gpus_per_node * C.get().get('num_nodes', 1)
assert world_size == C.get()['gpus'], f"Did not specify the number of GPUs in Config with which it was started: {world_size} vs {C.get()['gpus']}"
port_suffix = str(random.randint(10,99))
if world_size > 1:
if C.get().get('num_nodes', 1) == 1:
outcome = mp.spawn(spawn_process,
args=(world_size,port_suffix,parsed_args, parsed_args.config),
nprocs=world_size,
join=True)
else:
port_suffix = C.get()['master_port']
outcome = mp.spawn(spawn_process,
args=(world_size, port_suffix, parsed_args, parsed_args.config, None, None, C.get()['node_id']),
nprocs=n_gpus_per_node,
join=True)
else:
spawn_process(0, 0, None, parsed_args)
with open(f'/tmp/samshpopt/training_with_portsuffix_{port_suffix}.pkl', 'r') as f:
result = pickle.load(f)
else:
spawn_process(None, -1, None, parsed_args, local_rank=args.local_rank)
| 22,694
| 44.209163
| 286
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/imagenet.py
|
from torchvision.datasets.imagenet import *
class ImageNet(ImageFolder):
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Copied from torchvision, besides warning below.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
WARN::
This is the same ImageNet class as in torchvision.datasets.imagenet, but it has the `ignore_archive` argument.
This allows us to only copy the unzipped files before training.
"""
def __init__(self, root, split='train', download=None, ignore_archive=False, **kwargs):
if download is True:
msg = ("The dataset is no longer publicly accessible. You need to "
"download the archives externally and place them in the root "
"directory.")
raise RuntimeError(msg)
elif download is False:
msg = ("The use of the download flag is deprecated, since the dataset "
"is no longer publicly accessible.")
warnings.warn(msg, RuntimeWarning)
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
if not ignore_archive:
self.parse_archives()
wnid_to_classes = load_meta_file(self.root)[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
def parse_archives(self):
if not check_integrity(os.path.join(self.root, META_FILE)):
parse_devkit_archive(self.root)
if not os.path.isdir(self.split_folder):
if self.split == 'train':
parse_train_archive(self.root)
elif self.split == 'val':
parse_val_archive(self.root)
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
| 3,096
| 42.013889
| 118
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/resnet.py
|
# Original code: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# gamma is initialized ot 0 in the last BN of each residual block
import torch.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
nn.init.zeros_(self.bn2.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)
nn.init.zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, dataset, depth, num_classes, bottleneck=False):
super(ResNet, self).__init__()
self.dataset = dataset
if self.dataset.startswith('cifar'):
self.inplanes = 16
print(bottleneck)
if bottleneck == True:
n = int((depth - 2) / 9)
block = Bottleneck
else:
n = int((depth - 2) / 6)
block = BasicBlock
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
# self.avgpool = nn.AvgPool2d(8)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64 * block.expansion, num_classes)
elif dataset == 'imagenet':
blocks ={18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}
layers ={18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}
assert layers[depth], 'invalid detph for ResNet (depth should be one of 18, 34, 50, 101, 152, and 200)'
self.inplanes = 64
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(blocks[depth], 64, layers[depth][0])
self.layer2 = self._make_layer(blocks[depth], 128, layers[depth][1], stride=2)
self.layer3 = self._make_layer(blocks[depth], 256, layers[depth][2], stride=2)
self.layer4 = self._make_layer(blocks[depth], 512, layers[depth][3], stride=2)
# self.avgpool = nn.AvgPool2d(7)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * blocks[depth].expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 6,492
| 34.288043
| 135
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/mlp.py
|
import torch
from torch import nn
def MLP(D_out,in_dims,adaptive_dropouter_creator):
print('adaptive dropouter', adaptive_dropouter_creator)
in_dim = 1
for d in in_dims: in_dim *= d
ada_dropper = adaptive_dropouter_creator(100) if adaptive_dropouter_creator is not None else None
model = nn.Sequential(
nn.Flatten(),
nn.Linear(in_dim, 300),
nn.Tanh(),
nn.Linear(300,100),
ada_dropper or nn.Identity(),
nn.Tanh(),
nn.Linear(100,D_out)
)
model.adaptive_dropouters = [ada_dropper] if ada_dropper is not None else []
return model
| 616
| 28.380952
| 101
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/__init__.py
|
import torch
from torch import nn
from torch.nn import DataParallel
import torch.backends.cudnn as cudnn
# from torchvision import models
from DeepAA_evaluate.networks.resnet import ResNet
from DeepAA_evaluate.networks.shakeshake.shake_resnet import ShakeResNet
from DeepAA_evaluate.networks.wideresnet import WideResNet
from DeepAA_evaluate.networks.shakeshake.shake_resnext import ShakeResNeXt
from DeepAA_evaluate.networks.convnet import SeqConvNet
from DeepAA_evaluate.networks.mlp import MLP
from DeepAA_evaluate.common import apply_weightnorm
# example usage get_model(
def get_model(conf, bs, num_class=10, writer=None):
name = conf['type']
ad_creators = (None,None)
if name == 'resnet50':
model = ResNet(dataset='imagenet', depth=50, num_classes=num_class, bottleneck=True)
elif name == 'resnet200':
model = ResNet(dataset='imagenet', depth=200, num_classes=num_class, bottleneck=True)
elif name == 'resnet18':
model = ResNet(dataset='imagenet', depth=18, num_classes=num_class, bottleneck=False)
elif name == 'wresnet40_2':
model = WideResNet(40, 2, dropout_rate=conf.get('dropout',0.0), num_classes=num_class, adaptive_dropouter_creator=ad_creators[0],adaptive_conv_dropouter_creator=ad_creators[1], groupnorm=conf.get('groupnorm', False), examplewise_bn=conf.get('examplewise_bn', False), virtual_bn=conf.get('virtual_bn', False))
elif name == 'wresnet28_10':
model = WideResNet(28, 10, dropout_rate=conf.get('dropout',0.0), num_classes=num_class, adaptive_dropouter_creator=ad_creators[0],adaptive_conv_dropouter_creator=ad_creators[1], groupnorm=conf.get('groupnorm',False), examplewise_bn=conf.get('examplewise_bn', False), virtual_bn=conf.get('virtual_bn', False))
elif name == 'wresnet28_2':
model = WideResNet(28, 2, dropout_rate=conf.get('dropout', 0.0), num_classes=num_class,
adaptive_dropouter_creator=ad_creators[0], adaptive_conv_dropouter_creator=ad_creators[1],
groupnorm=conf.get('groupnorm', False), examplewise_bn=conf.get('examplewise_bn', False),
virtual_bn=conf.get('virtual_bn', False))
elif name == 'miniconvnet':
model = SeqConvNet(num_class,adaptive_dropout_creator=ad_creators[0],batch_norm=False)
elif name == 'mlp':
model = MLP(num_class, (3,32,32), adaptive_dropouter_creator=ad_creators[0])
elif name == 'shakeshake26_2x96d':
model = ShakeResNet(26, 96, num_class)
elif name == 'shakeshake26_2x112d':
model = ShakeResNet(26, 112, num_class)
elif name == 'shakeshake26_2x96d_next':
model = ShakeResNeXt(26, 96, 4, num_class)
else:
raise NameError('no model named, %s' % name)
if conf.get('weight_norm', False):
print('Using weight norm.')
apply_weightnorm(model)
#model = model.cuda()
#model = DataParallel(model)
cudnn.benchmark = True
return model
def num_class(dataset):
return {
'cifar10': 10,
'noised_cifar10': 10,
'targetnoised_cifar10': 10,
'reduced_cifar10': 10,
'cifar10.1': 10,
'pre_transform_cifar10': 10,
'cifar100': 100,
'pre_transform_cifar100': 100,
'fiftyexample_cifar100': 100,
'tenclass_cifar100': 10,
'svhn': 10,
'svhncore': 10,
'reduced_svhn': 10,
'imagenet': 1000,
'smallwidth_imagenet': 1000,
'ohl_pipeline_imagenet': 1000,
'reduced_imagenet': 120,
}[dataset]
| 3,545
| 42.243902
| 316
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/convnet.py
|
import torch
from torch import nn
class SeqConvNet(nn.Module):
def __init__(self,D_out,fixed_dropout=None,in_channels=3,channels=(64,64),h_dims=(200,100),adaptive_dropout_creator=None,batch_norm=False):
super().__init__()
print("Using SeqConvNet")
assert len(channels) == 2 == len(h_dims)
pool = lambda: nn.MaxPool2d(2,2)
dropout = lambda: torch.nn.Dropout(p=fixed_dropout)
dropout_li = lambda: ([] if fixed_dropout is None else [dropout()])
relu = lambda: torch.nn.ReLU(inplace=False)
flatten = lambda l: [item for sublist in l for item in sublist]
convs = [nn.Conv2d(in_channels, channels[0], 5),nn.Conv2d(channels[0], channels[1], 5)]
fcs = [nn.Linear(channels[1] * 5 * 5, h_dims[0]),nn.Linear(h_dims[0], h_dims[1])]
self.final_fc = nn.Linear(h_dims[1], D_out)
self.conv_blocks = nn.Sequential(*flatten([[conv,relu(),pool()] + dropout_li() for conv in convs]))
self.bn = nn.BatchNorm1d(h_dims[1], momentum=.9) if batch_norm else nn.Identity()
self.fc_blocks = nn.Sequential(*flatten([[fc,relu()] + dropout_li() for fc in fcs]))
self.adaptive_dropouters = [adaptive_dropout_creator(h_dims[1])] if adaptive_dropout_creator is not None else []
def forward(self, x):
x = self.conv_blocks(x)
x = torch.nn.Flatten()(x)
x = self.fc_blocks(x)
if self.adaptive_dropouters:
x = self.adaptive_dropouters[0](x)
x = self.bn(x)
x = self.final_fc(x)
return x
| 1,546
| 47.34375
| 143
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/wideresnet.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
_bn_momentum = 0.1
CpG = 8
class ExampleWiseBatchNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1,
affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def forward(self, input):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
# calculate running estimates
if self.training:
mean = input.mean([0, 2, 3])
# use biased var in train
var = input.var([0, 2, 3], unbiased=False)
n = input.numel() / input.size(1)
with torch.no_grad():
self.running_mean = exponential_average_factor * mean\
+ (1 - exponential_average_factor) * self.running_mean
# update running_var with unbiased var
self.running_var = exponential_average_factor * var * n / (n - 1)\
+ (1 - exponential_average_factor) * self.running_var
local_means = input.mean([2, 3])
local_global_means = local_means + (mean.unsqueeze(0) - local_means).detach()
local_vars = input.var([2, 3], unbiased=False)
local_global_vars = local_vars + (var.unsqueeze(0) - local_vars).detach()
input = (input - local_global_means[:,:,None,None]) / (torch.sqrt(local_global_vars[:,:,None,None] + self.eps))
else:
mean = self.running_mean
var = self.running_var
input = (input - mean[None, :, None, None]) / (torch.sqrt(var[None, :, None, None] + self.eps))
if self.affine:
input = input * self.weight[None, :, None, None] + self.bias[None, :, None, None]
return input
class VirtualBatchNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1,
affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def forward(self, input):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
# calculate running estimates
if self.training:
mean = input.mean([0, 2, 3])
# use biased var in train
var = input.var([0, 2, 3], unbiased=False)
n = input.numel() / input.size(1)
with torch.no_grad():
self.running_mean = exponential_average_factor * mean \
+ (1 - exponential_average_factor) * self.running_mean
# update running_var with unbiased var
self.running_var = exponential_average_factor * var * n / (n - 1) \
+ (1 - exponential_average_factor) * self.running_var
input = (input - mean.detach()[None, :, None, None]) / (torch.sqrt(var.detach()[None, :, None, None] + self.eps))
else:
mean = self.running_mean
var = self.running_var
input = (input - mean[None, :, None, None]) / (torch.sqrt(var[None, :, None, None] + self.eps))
if self.affine:
input = input * self.weight[None, :, None, None] + self.bias[None, :, None, None]
return input
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, norm_creator, stride=1, adaptive_dropouter_creator=None):
super(WideBasic, self).__init__()
self.bn1 = norm_creator(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
if adaptive_dropouter_creator is None:
self.dropout = nn.Dropout(p=dropout_rate)
else:
self.dropout = adaptive_dropouter_creator(planes, 3, stride, 1)
self.bn2 = norm_creator(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes, adaptive_dropouter_creator, adaptive_conv_dropouter_creator, groupnorm, examplewise_bn, virtual_bn):
super(WideResNet, self).__init__()
self.in_planes = 16
self.adaptive_conv_dropouter_creator = adaptive_conv_dropouter_creator
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
assert sum([groupnorm,examplewise_bn,virtual_bn]) <= 1
n = int((depth - 4) / 6)
k = widen_factor
nStages = [16, 16*k, 32*k, 64*k]
self.adaptive_dropouters = [] #nn.ModuleList()
if groupnorm:
print('Uses group norm.')
self.norm_creator = lambda c: nn.GroupNorm(max(c//CpG, 1), c)
elif examplewise_bn:
print("Uses Example Wise BN")
self.norm_creator = lambda c: ExampleWiseBatchNorm2d(c, momentum=_bn_momentum)
elif virtual_bn:
print("Uses Virtual BN")
self.norm_creator = lambda c: VirtualBatchNorm2d(c, momentum=_bn_momentum)
else:
self.norm_creator = lambda c: nn.BatchNorm2d(c, momentum=_bn_momentum)
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(WideBasic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(WideBasic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(WideBasic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = self.norm_creator(nStages[3])
self.linear = nn.Linear(nStages[3], num_classes)
if adaptive_dropouter_creator is not None:
last_dropout = adaptive_dropouter_creator(nStages[3])
else:
last_dropout = lambda x: x
self.adaptive_dropouters.append(last_dropout)
# self.apply(conv_init)
def to(self, *args, **kwargs):
super().to(*args,**kwargs)
print(*args)
for ad in self.adaptive_dropouters:
if hasattr(ad,'to'):
ad.to(*args,**kwargs)
return self
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
ada_conv_drop_c = self.adaptive_conv_dropouter_creator if i == 0 else None
new_block = block(self.in_planes, planes, dropout_rate, self.norm_creator, stride, adaptive_dropouter_creator=ada_conv_drop_c)
layers.append(new_block)
if ada_conv_drop_c is not None:
self.adaptive_dropouters.append(new_block.dropout)
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
# out = F.avg_pool2d(out, 8)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.adaptive_dropouters[-1](out)
out = self.linear(out)
return out
| 8,885
| 39.949309
| 171
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/shakeshake/shake_resnet.py
|
# -*- coding: utf-8 -*-
import math
import torch.nn as nn
import torch.nn.functional as F
from DeepAA_evaluate.networks.shakeshake.shakeshake import ShakeShake
from DeepAA_evaluate.networks.shakeshake.shakeshake import Shortcut
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
if self.equal_io:
self.shortcut = lambda x: x
else:
self.shortcut = Shortcut(in_ch, out_ch, stride=stride)
#self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch, stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
#h0 = x if self.equal_io else self.shortcut(x)
h0 = self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=stride, bias=False),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=False),
nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNet(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNet, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
# Initialize paramters
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
| 2,927
| 32.655172
| 89
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/shakeshake/shake_resnext.py
|
# -*- coding: utf-8 -*-
import math
import torch.nn as nn
import torch.nn.functional as F
from DeepAA_evaluate.networks.shakeshake.shakeshake import ShakeShake
from DeepAA_evaluate.networks.shakeshake.shakeshake import Shortcut
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch, stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(
nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=cardinary, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
# Initialize paramters
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch, cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
| 3,094
| 35.411765
| 97
|
py
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/shakeshake/__init__.py
| 0
| 0
| 0
|
py
|
|
DeepAA
|
DeepAA-master/DeepAA_evaluate/networks/shakeshake/shakeshake.py
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.cuda.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.cuda.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
| 1,413
| 27.857143
| 86
|
py
|
emrQA
|
emrQA-master/main.py
|
from subprocess import check_call
import sys
import os
import csv
PYTHON = sys.executable
#################################### set the full file paths ###############################################
i2b2_relations_challenge_directory = "i2b2/relations/"
i2b2_medications_challenge_directory = "i2b2/medication/"
i2b2_heart_disease_risk_challenge_directory = "i2b2/heart-disease-risk/"
i2b2_obesity_challenge_directory = "i2b2/obesity/"
i2b2_smoking_challenge_directory = "i2b2/smoking/"
i2b2_coreference_challeneg_directory = "i2b2/coreference"
templates_directory = "templates/templates-all.csv"
#################################### make output directory if it does not already exist #########################
cwd = os.getcwd()
model_dir = "output/"
if not os.path.exists(os.path.join(cwd,model_dir)):
os.makedirs(model_dir)
output_directory = os.path.join(cwd,model_dir) ## you can modify this to change the output directory path ##
###########################################################################################################
matching_notes = os.path.join("generation/i2b2_relations/", "matching_notes.csv")
match_file = open(matching_notes)
csvreader = csv.reader(match_file)
matching_files = list(csvreader) # relation, coreference
new_file = []
new_file.append(matching_files[0])
flag = 0
for file in matching_files[1:]:
if i2b2_relations_challenge_directory in file[0]:
flag = 1
break
new_file.append([os.path.join(i2b2_relations_challenge_directory,file[0]),os.path.join(i2b2_coreference_challeneg_directory,file[1])])
if flag == 0:
ofile = open(matching_notes, "w")
filewriter = csv.writer(ofile, delimiter="\t")
for val in new_file:
filewriter.writerow(val)
ofile.close()
################################### run the generation scripts #######################################
cmd = "{python} generation/i2b2_medications/medication-answers.py --i2b2_dir={i2b2_dir} --templates_dir={templates_dir} --output_dir={output_dir}".format(python=PYTHON, i2b2_dir=i2b2_medications_challenge_directory, templates_dir=templates_directory, output_dir=output_directory)
print(cmd)
check_call(cmd, shell=True)
cmd = "{python} generation/i2b2_relations/relations-answers.py --i2b2_dir={i2b2_dir} --templates_dir={templates_dir} --output_dir={output_dir}".format(python=PYTHON, i2b2_dir=i2b2_relations_challenge_directory, templates_dir=templates_directory, output_dir=output_directory)
print(cmd)
check_call(cmd, shell=True)
cmd = "{python} generation/i2b2_heart_disease_risk/risk-answers.py --i2b2_dir={i2b2_dir} --templates_dir={templates_dir} --output_dir={output_dir}".format(python=PYTHON, i2b2_dir=i2b2_heart_disease_risk_challenge_directory, templates_dir=templates_directory, output_dir=output_directory)
print(cmd)
check_call(cmd, shell=True)
cmd = "{python} generation/i2b2_smoking/smoking-answers.py --i2b2_dir={i2b2_dir} --templates_dir={templates_dir} --output_dir={output_dir}".format(python=PYTHON, i2b2_dir=i2b2_smoking_challenge_directory, templates_dir=templates_directory, output_dir=output_directory)
print(cmd)
check_call(cmd, shell=True)
cmd = "{python} generation/i2b2_obesity/obesity-answers.py --i2b2_dir={i2b2_dir} --templates_dir={templates_dir} --output_dir={output_dir}".format(python=PYTHON, i2b2_dir=i2b2_obesity_challenge_directory, templates_dir=templates_directory, output_dir=output_directory)
print(cmd)
check_call(cmd, shell=True)
################## combine all the output files and generate the output in normal format ####################
cmd = "{python} generation/combine_data/combine_answers.py --output_dir={output_dir}".format(python=PYTHON, output_dir=output_directory)
print(cmd)
check_call(cmd, shell=True)
##################### convert normal output to squad format ##################################
######################### basic analysis of the dataset #######################################
'''
cmd = "{python} evaluation/analysis.py".format(python=PYTHON)
print(cmd)
check_call(cmd, shell=True)
'''
| 4,047
| 42.06383
| 287
|
py
|
emrQA
|
emrQA-master/evaluation/template-analysis.py
|
import json
import csv
import os
import numpy as np
import collections
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--templates_dir', default='/home/anusri/Desktop/emrQA/templates', help='Directory containing template files in the given format')
args = parser.parse_args()
relations = ["reveals", "relates","causes","given","conducted","improves","worsens"]
Functions = ["CheckRange","CheckIfNull","sortBy"]
attributes = ["date","result","onsetdate","startdate","QuitDate","PackPerDay","status","abnormalResultFlag","adherence","enddate","IsTobaccoUser","sig",
"YearsOfUse","diagnosisdate","dosage"]
attribute_values_defined = ["pending","currentDate"]
csv_reader = list(csv.reader(open(os.path.join(args.templates_dir,"templates-all.csv"))))
answer = "no"
question_lforms = []
for line in csv_reader[1:]:
dataset = line[0]
if dataset == "relations":
check = line[5]
else:
check = line[4]
## analyze all logical forms or only the ones used with answers,
if answer == "yes":
if check != "none":
if (line[2],line[3]) not in question_lforms:
question_lforms.append((line[2],line[3]))
else:
if (line[2],line[3]) not in question_lforms:
question_lforms.append((line[2],line[3]))
########################################################################################################
lforms = []
for (question_list,lform) in question_lforms:
#print(lform)
if lform not in lforms:
lforms.append(lform.replace("\t", "").replace("|medication|","|treatment|"))
##########################################################################################################
#print(len(lforms))
lform_vocab = []
for lform in lforms:
lform = lform.replace("-"," - ").replace("1","").replace("2","").replace("/"," / ").replace("<"," < ").replace(">"," > ").replace("("," ( ").replace(")"," ) ").replace("["," [ ").replace("]"," ] ").replace("{"," { ").replace("}"," } ").replace("="," = ").replace(",", " , ")
if lform.count("(") != lform.count(")"):
print("(")
print(lform)
if lform.count("{") != lform.count("}"):
print("{")
print(lform)
if lform.count("[") != lform.count("]"):
print('[')
print(lform)
tokens = [tok for tok in lform.split(" ") if tok != ""]
lform_vocab += tokens
vocab_counter = collections.Counter(lform_vocab)
Events = []
arguments = []
arthemetic = []
brackets = []
Events = []
arthemetic = []
punctuations = []
attribute_values = []
Functions = []
Event_Combination = []
Relations_COmbination = []
brackets = []
arguments = []
for vocab in vocab_counter:
if "Event" in vocab:
Events.append(vocab)
elif vocab in relations + Functions + attributes + attribute_values_defined:
pass
elif "." in vocab:
attribute_values.append(vocab)
elif vocab in [">","<","=","Y","N","x","-"]:
arthemetic.append(vocab)
elif vocab in ["OR", "AND"]:
Event_Combination.append(vocab)
elif vocab in ["/"]:
Relations_COmbination.append(vocab)
elif vocab in ["(",")","[","]","{","}"]:
brackets.append(vocab)
elif "|" in vocab:
arguments.append(vocab)
elif "," in vocab:
punctuations.append(vocab)
else:
pass
arthemetic_questions = []
question_with_relation = []
medical_domain_qs = []
date_questions = []
time_questions = []
trend_question = []
events_used = {}
multiple_events = []
Lab_Questions = []
arthmetic_questions = []
indefinite_evidence = []
event_confirmation = []
current = []
property = 0.0
past = []
more_than_one = 0.0
attribute_questions = 0.0
event_questions = 0.0
medical_queston = 0.0
for event in Events:
events_used[event] = 0
for lform in lforms:
#print(lform)
lform = lform.replace("-", " - ").replace("1", "").replace("2", "").replace("/", " / ").replace("<", " < ").replace(
">",
" > ").replace(
"(", " ( ").replace(")", " ) ").replace("[", " [ ").replace("]", " ] ").replace("{", " { ").replace("}",
" } ").replace(
"=", " = ").replace(",", " , ")
if "( x )" in lform:
#print(lform)
event_questions += 1
if "= "in lform:
#print(lform)
attribute_questions += 1
if "." in lform:
#print(lform)
medical_queston += 1
tokens = [tok for tok in lform.split(" ") if tok != ""]
rel = set(tokens).intersection(set(relations))
if len(set(["CheckRange", ">", "<", ]).intersection(tokens)) != 0:
#print(lform)
arthemetic_questions.append(lform)
if len(rel) == 0:
if "[" not in tokens:
indefinite_evidence.append(lform)
else:
out = list((set(Events)).intersection(set(tokens))) ## Event Property Questions
for e in out:
events_used[e] += 1
property += 1
else:
question_with_relation.append(lform)
if len(rel) > 0:
more_than_one += 1
print("Arthemetic questions",len(arthemetic_questions)*100.0/len(lforms))
print("One or more than one relations", 100.0 * more_than_one/len(lforms))
print("Course Questions",100.0*event_questions/len(lforms))
print("Fine Questions",100.0*attribute_questions/len(lforms))
print("Medical Questions",100.0*medical_queston/len(lforms))
## medical
## corse
## fine
| 5,510
| 28.789189
| 278
|
py
|
emrQA
|
emrQA-master/evaluation/paraphrase-analysis.py
|
import csv
import os
import nltk
from nltk.metrics import *
from nltk.translate.bleu_score import sentence_bleu
import argparse
import itertools
import random
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--templates_dir', default='/home/anusri/Desktop/emrQA/templates', help='Directory containing template files in the given format')
args = parser.parse_args()
csv_reader = list(csv.reader(open(os.path.join(args.templates_dir,"templates-all.csv"))))
def scoring_method(qtuple,method):
if method == "jaccard_score":
set1 = set(nltk.word_tokenize(qtuple[0]))
set2 = set(nltk.word_tokenize(qtuple[1]))
score = jaccard_distance(set1,set2)
if method == "blue_score":
(reference, candidate) = qtuple
score = sentence_bleu(reference, candidate)
return score
if __name__=="__main__":
method = "blue_score"
#method = "jaccard_score"
unique_logical_forms = []
total_questions = []
total_scores = []
for line in csv_reader[1:]:
question = line[2].strip()
logical_form = line[3].strip()
question = question.replace("|medication| or |medication|", "|medication|")
question = question.replace("|problem| or |problem|", "|problem|")
question = question.replace("|test| or |test|", "|test|")
question = question.replace("|test| |test| |test|", "|test|")
question = question.replace("\t", "")
logical_form = logical_form.replace("\t", "").replace("|medication|","|treatment|")
if logical_form not in unique_logical_forms:
unique_logical_forms.append(logical_form)
paraphrase_questions = question.split("##")
random.shuffle(paraphrase_questions)
total_questions.extend(list(set(paraphrase_questions)))
question_tuples = list(itertools.product([paraphrase_questions[0]], paraphrase_questions[1:]))
scores = []
for qtuple in question_tuples:
if qtuple[0] == qtuple[1]:
continue
scoring_tuple = scoring_method(qtuple, method)
scores.append(scoring_tuple)
if len(scores) != 0:
min_value = min(scores)
max_value = max(scores)
total_scores.extend(scores)
## total questions by total question types
print("Average paraphrases per question", len(total_questions)*1.0/len(unique_logical_forms))
print("Average of "+ method+ " of paraphrases", np.mean(np.array(total_scores)))
print("Standard deviation of " + method + " of paraphrases", np.std(np.array(total_scores)))
| 2,611
| 33.368421
| 150
|
py
|
emrQA
|
emrQA-master/evaluation/basic-stats.py
|
import json
from nltk.tokenize.stanford import StanfordTokenizer
import os
import numpy as np
import matplotlib.pyplot as plt
import nltk
from random import *
from nltk import sent_tokenize
from nltk import word_tokenize
import random
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default='/home/anusri/Desktop/emrQA/output/', help='Directory to store the output')
args = parser.parse_args()
#os.environ['STANFORD_PARSER'] = '/home/anusri/Desktop/codes_submission/packages/stanford-jars/'
#os.environ['STANFORD_MODELS'] = '/home/anusri/Desktop/codes_submission/packages/stanford-jars'
#tokenizer = StanfordTokenizer("/home/anusri/Desktop/codes_submission/packages/stanford-jars/stanford-postagger.jar")
#from matplotlib2tikz import save as tikz_save
def LengthStatistics(list_values):
metrics = {}
Total_values= len(list_values)
Total_Tokens = 0.0
#print(Total_values)
for question in list_values:
words = word_tokenize(question.strip())
words = [word for word in words if word != ""]
Total_Tokens += len(words)
Avg_token_length = Total_Tokens / Total_values
metrics["question_length"] = Total_values
metrics["avg_question_length"] = Avg_token_length
return (Total_values, Avg_token_length)
problem = []
treatments = []
tests = []
if __name__ == '__main__':
data_file = os.path.join(args.output_dir,"data.json")
datasets = json.load(open(data_file), encoding="latin-1")
all_questions = []
all_clinical_notes = []
total_clinical_notes = 0
number_of_answers_per_question = {}
num_classes = 0.0
classes = []
total_evidences = []
for dataset in datasets["data"]:
print("Processing dataset",dataset["title"])
for note in dataset["paragraphs"]:
total_clinical_notes += 1
if " ".join(note["context"]) not in all_clinical_notes:
all_clinical_notes.extend([" ".join(note["context"])])
else:
continue
for questions in note["qas"]:
all_answers = []
evidences = []
all_questions.append(list(set(questions["question"]))) # all questions
for answer in questions["answers"]:
if dataset["title"] in ["obesity", "smoking"] :
#print(answer["text"])
classes.append(answer["text"])
continue
#for txt in answer["text"]:
# if txt not in all_answers:
# all_answers.append(txt)
else:
if answer["answer_start"][0] != "":
if answer["answer_start"] not in all_answers:
all_answers.append(answer["answer_start"]) ## all answers
#print(questions["question"][0], answer["answer_start"],answer["evidence"])
evidences.append(answer["evidence"])
total_evidences.extend(evidences)
## distribution of evidences per question type
ground_truth = all_answers
total_answers = len(ground_truth)
if total_answers not in number_of_answers_per_question:
number_of_answers_per_question[total_answers] = 0
number_of_answers_per_question[total_answers] += 1
print("Total Clinical Notes", total_clinical_notes, len(all_clinical_notes))
total_question = len(all_questions)
totals = 0
questions_list = []
for value in all_questions:
totals += len(value)
questions_list.extend(value)
## Average Question Length ##
print("Total Number Of Questions",totals)
print("Total number of question types", total_question)
stats_questions = LengthStatistics(questions_list)
print("Average question length",stats_questions[1])
## Average Evidence Length ##
stats_evidences = LengthStatistics(total_evidences)
print("Average evidence length",stats_evidences[1])
## Average Note Length ##
stats_evidences = LengthStatistics(all_clinical_notes)
print("Average clinical note length", stats_evidences[1])
## Average number of questions per note ##
print("Average Number of questions per note", totals/total_clinical_notes)
print("Average number of question types per note", total_question/total_clinical_notes)
## Average number of evidences per question ##
total__num_answers = 0
for value in number_of_answers_per_question:
if value == 0:
print(number_of_answers_per_question[value])
else:
total__num_answers += value*number_of_answers_per_question[value]
num_classes = len(set(classes))
print("Average number of evidences", float(total__num_answers) / total_question)
print("Percentage with one evidences",number_of_answers_per_question[1]*100.0/total_question)
print("range in number of evidences",min(number_of_answers_per_question.keys()),max(number_of_answers_per_question.keys()))
print("total number of classes in obesity and smoking datasets", num_classes)
################# more stats ignore for now ######################
# indefinite_evidence_type = []
# forms_in_data = []
#print(indefinite_evidence_type)
#print("indefinite",len(num_answers)*100.0/total_question)
#print(min(num_answers),max(num_answers))
#plt.figure(2)
#plt.xlabel("Number of evidences greater than 1")
#plt.ylabel("Frequency")
#plt.title("Formula Size Bins")
#plt.hist(num_answers, bins=3)
#plt.show()
#tikz_save('evidences-hist.tex')
#print(number_of_answers_per_question)
#stats_clinincal_notes = LengthStatistics(all_clinical_notes)
#print("Total Clinincal Notes",stats_clinincal_notes[0])
#print("Average Clinincal Note length", stats_clinincal_notes[1])
#print(number_of_answers_per_question[0])
#print(number_of_answers_per_question[1])
#print(number_of_answers_per_question)
## Plot the distribution of number of answer
#print(number_of_answers_per_question)
#x = np.arange(len(number_of_answers_per_question)-1)
#plt.bar(x,list(np.array(number_of_answers_per_question.values().remove(number_of_answers_per_question[1]))))
#plt.xticks(x, number_of_answers_per_question.keys().remove(1))
#plt.show()
| 6,503
| 34.156757
| 127
|
py
|
emrQA
|
emrQA-master/generation/i2b2_relations/problem_classfiers.py
|
from nltk.stem import WordNetLemmatizer
import nltk
from nltk.corpus import stopwords
## Open common names to use in is_common_noun function ##
file = open("generation/i2b2_relations/common_names.txt") ## you can use any set of common nouns to filter, here we call the top 500 high frequency words occuring in our templates as commoun nouns ##
data = file.readlines()
file.close()
common_nouns = [line.strip() for line in data]
## Get Stop words ##
stopWords = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
## Functions For Use ##
def concept_is_CommonNoun(concept):
'''
Return 1 if the concept is a common noun
:param concept:
:return:
'''
tags = nltk.pos_tag(nltk.word_tokenize(concept))
[words, tag] = zip(*tags)
words = list(words)
nouns = []
if tag[0] in ["DT", "PRP", "PRP$"]:
words[0] = ""
for idx in range(1, len(tag)):
if words[idx] in stopWords:
continue
nouns.append(words[idx])
else:
for idx in range(len(tag)):
if words[idx] in stopWords:
continue
nouns.append(words[idx])
flag = 0
for noun in nouns:
if (lemmatizer.lemmatize(noun) in common_nouns) or (noun in common_nouns):
flag = 1
else:
flag = 0
break
'''
if flag == 1:
print(" ".join(words).strip(), tags)
'''
return flag
def concept_is_PastTense(concept):
'''
Return 1 if the concept ends in past tense
:param concept:
:return:
'''
text = nltk.word_tokenize(concept)
tagged = nltk.pos_tag(text)
tense = {}
tense["future"] = len([word for word in tagged[-1:] if word[1] == "MD"])
tense["present"] = len([word for word in tagged[-1:] if word[1] in ["VBP", "VBZ", "VBG"]])
tense["past"] = len([word for word in tagged[-1:] if word[1] in ["VBD", "VBN"]])
if tense["past"] > 0:
flag = 1
else:
flag = 0
return flag
'''
import sys
sys.path.insert(0, '/home/anusri/Desktop/IBM/GetUMLS/QuickUMLS')
import quickumls
matcher = quickumls.QuickUMLS("/home/anusri/Desktop/IBM/GetUMLS/installation")
## Get UMLS semantic mapping ##
sfile = open("/home/anusri/Desktop/IBM/GetUMLS/QuickUMLS/SemanticTypes_2013AA.txt")
data = sfile.readlines()
sfile.close()
mapping = {}
for line in data:
words = line.split("|")
short_type = words[1]
full_type = words[0]
mapping[short_type] = full_type
def concept_is_Disease(concept):
#if concept_is_CommonNoun(concept) == 1:
# return 0
SemanticTypes = CheckSemanticType(concept)
otype = disease
for (word,wtype) in SemanticTypes:
for type in wtype:
if (type in otype):
return 1
return 0
def concept_is_Symptom(concept):
# if concept_is_CommonNoun(concept) == 1:
# return 0
SemanticTypes = CheckSemanticType(concept)
for (word, wtype) in SemanticTypes:
for type in wtype:
if (type in symptoms):
return 1
return 0
def concept_is_MentalDisease(concept):
# if concept_is_CommonNoun(concept) == 1:
# return 0
SemanticTypes = CheckSemanticType(concept)
for (word, wtype) in SemanticTypes:
for type in wtype:
if (type in mental_disease):
return 1
return 0
def concept_is_VirusBacterium(concept):
# if concept_is_CommonNoun(concept) == 1:
# return 0
SemanticTypes = CheckSemanticType(concept)
for (word, wtype) in SemanticTypes:
for type in wtype:
if type in bacteria:
return 1
return 0
def concept_is_Injury(concept):
# if concept_is_CommonNoun(concept) == 1:
# return 0
SemanticTypes = CheckSemanticType(concept)
for (word, wtype) in SemanticTypes:
for type in wtype:
if (type in injury):
return 1
return 0
def concept_is_Abnormality(concept):
# if concept_is_CommonNoun(concept) == 1:
# return 0
SemanticTypes = CheckSemanticType(concept)
for (word, wtype) in SemanticTypes:
for type in wtype:
if (type in abnormality):
return 1
return 0
def concept_is_AbnormalTestResult(concept):
# if concept_is_CommonNoun(concept) == 1:
# return 0
SemanticTypes = CheckSemanticType(concept)
for (word, wtype) in SemanticTypes:
for type in wtype:
if (type in lab_result):
return 1
return 0
def CheckSemanticType(text):
types = []
out = matcher.match(text, best_match=True, ignore_syntax=False)
for words in out:
word = words[0]["ngram"]
temp = []
for type in list(words[0]["semtypes"]):
temp.append(mapping[type])
types.append((word,temp))
return types
## Functions for script check ##
#TenseFilter()
def determine_tense_input(sentance):
text = nltk.word_tokenize(sentance)
tagged = nltk.pos_tag(text)
tense = {}
tense["future"] = len([word for word in tagged[-1:] if word[1] == "MD"])
tense["present"] = len([word for word in tagged[-1:] if word[1] in ["VBP", "VBZ", "VBG"]])
tense["past"] = len([word for word in tagged[-1:] if word[1] in ["VBD", "VBN"]])
return tense
def TenseFilter():
file = open("problem-concept.txt")
data = file.readlines()
file.close()
concepts = [line.strip() for line in data]
past = []
future = []
for concept in concepts:
tense = determine_tense_input(concept)
if tense["past"] > 0:
past.append(concept)
if tense["future"] > 0:
future.append(concept)
#for word in past:
# term = word.strip().split(" ")
# if len(term) > 1:
# term = term[-1]
# else:
# term = term[0]
# print(term)
# print(word,en.verb.present(term))
print(past)
print(future)
#FilterCommonNouns()
'''
| 6,016
| 23.863636
| 199
|
py
|
emrQA
|
emrQA-master/generation/i2b2_relations/relations-answers.py
|
import csv
from os import listdir
from os.path import isfile, join
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
from problem_classfiers import concept_is_CommonNoun, concept_is_PastTense
import json
import sys
reload(sys)
sys.setdefaultencoding("ISO-8859-1")
import random
import argparse
import os
## Resolve the use of medications and treatments
parser = argparse.ArgumentParser()
parser.add_argument('--i2b2_dir', default='', help='Directory containing i2b2 relations challange files')
parser.add_argument('--templates_dir', default='', help='Directory containing template files in the given format')
parser.add_argument('--output_dir', default='', help='Directory to store the output')
args = parser.parse_args()
###################################################### SET FILE PATHS ##################################################################
## i2b2 file paths ##
relations_folder = args.i2b2_dir
FilePath = [ "concept_assertion_relation_training_data/partners/rel/", "concept_assertion_relation_training_data/beth/rel/", "test_data/rel/"]
RelationsFilePath = []
for file in FilePath:
RelationsFilePath.append(os.path.join(relations_folder,file))
FilePath = ["concept_assertion_relation_training_data/partners/txt/", "concept_assertion_relation_training_data/beth/txt/","test_data/txt/"]
NoteFilePath = []
for file in FilePath:
NoteFilePath.append(os.path.join(relations_folder,file))
FilePath = [ "concept_assertion_relation_training_data/partners/ast/", "concept_assertion_relation_training_data/beth/ast/", "test_data/ast/"]
AstFilePath = []
for file in FilePath:
AstFilePath.append(os.path.join(relations_folder,file))
## template file path ##
template_file_path = args.templates_dir
## matching notes in temporal, coreference and relations dataset ##
matching_notes = os.path.join("generation/i2b2_relations/", "matching_notes.csv")
## output file paths ##
#qa_output = "/home/anusri/Desktop/emrQA/output/relations-qa.csv"
ql_output = os.path.join(args.output_dir,"relations-ql.csv")
relations_qa_output_json = os.path.join(args.output_dir,"relations-qa.json")
### write to csv file for viz ##
qa_csv_write = False
ql_csv_write = True
######################################################## CODE #########################################################################
class GenerateRelationsQuestions():
def __init__(self):
## synsets to identify common nouns, will be used in preprocessing to remove generic i2b2 concepts ##
self.similar = []
val = [wn.synsets('problem'), wn.synsets('test'), wn.synsets('procedure'), wn.synsets('disease'),
wn.synsets('medication'), wn.synsets('treatment'), wn.synsets('surgery')]
self.count_corefs = 0
self.resolved_corefs = 0
for out in val:
for ss in out:
self.similar.extend(ss.lemma_names())
## set paths ##
self.RelationsFilePath = RelationsFilePath
self.NoteFilePath = NoteFilePath
self.AstFilePath = AstFilePath
self.ReadRelationsData()
self.ReadAssertionsData()
self.ReadTemplates()
######################### Read i2b2 file functions ###################################
def ReadRelationsData(self):
self.RelationsPerNote = {}
self.ClinicalNotes = {}
## relations as seen in i2b2 relations challenge ###
type = {"TeRP": ("test", "problem"), "TeCP": ("test", "problem"), "TrIP": ("treatment", "problem"),
"TrWP": ("treatment", "problem"),
"TrCP": ("treatment", "problem"), "TrAP": ("treatment", "problem"), "TrNAP": ("treatment", "problem"),
"PIP": ("problem1", "problem2")}
self.tr_status = {"TrIP": "improves", "TrWP": "worsens/not improves", "TrAP": "not known status",
"TrCP": "causes"}
## read in all clinical notes ##
for paths in self.NoteFilePath:
files = [f for f in listdir(paths) if isfile(join(paths, f))]
for file in files:
remote_file = open(paths + file)
Noteid = file.split(".")[0]
self.ClinicalNotes[Noteid] = remote_file.readlines()
## read the file which shows the common notes in temporal, relations and coreference files from i2b2 challenge ##
## NOTE: This information is not available as a part of i2b2. This file is generated by using approximate methods (script provided).##
match_file = open(matching_notes)
csvreader = csv.reader(match_file)
matching_files = list(csvreader) # relation, coreference
Coreference_Note = {}
self.CoreferenceCluster_to_Entity_map = {}
self.Entity_to_CoreferenceCluster_map = {}
### Create coreference clusters for every type in every note and give each cluster an id. ###
for file in matching_files[1:]:
file = file[0].split("\t")
relation_note_id = file[0].split("/")[-1].split(".")[0]
coreference_path = file[1]
coreferences = self.ReadCoreference(coreference_path, self.ClinicalNotes[relation_note_id])
Coreference_Note[relation_note_id] = coreferences
## Create coreference clusters for every note ##
self.CoreferenceCluster_to_Entity_map[relation_note_id] = {}
self.Entity_to_CoreferenceCluster_map[relation_note_id] = {}
for stype in coreferences:
## Create coreference clusters for every type (problem, test, treatment)##
if stype not in self.CoreferenceCluster_to_Entity_map[relation_note_id]:
self.CoreferenceCluster_to_Entity_map[relation_note_id][stype] = {}
self.Entity_to_CoreferenceCluster_map[relation_note_id][stype] = {}
cluster_id = 0
for coref_list in coreferences[stype]:
## coref_list gets id given by cluster_id
for concept in coref_list:
if cluster_id not in self.CoreferenceCluster_to_Entity_map[relation_note_id][stype]:
self.CoreferenceCluster_to_Entity_map[relation_note_id][stype][cluster_id] = []
self.CoreferenceCluster_to_Entity_map[relation_note_id][stype][cluster_id].append(concept) ## bug fixed ##
self.Entity_to_CoreferenceCluster_map[relation_note_id][stype][concept] = cluster_id
cluster_id += 1
#############################################################################################################################
self.map_problems_to_test_revealed = {}
self.map_tests_to_problem_revealed = {}
self.map_problems_to_test_investigated = {}
self.map_tests_to_problem_investigated = {}
self.map_treatments_to_problem = {}
self.map_problems_to_treatment = {}
self.problems_to_badtreatment = {}
self.allergic_treatments = {}
self.treatments_status_to_problem = {}
self.map_problems_to_treatment = {}
self.badtreatments_to_problem = {}
self.symptoms_to_problem = {}
self.problems_to_symptom = {}
for paths in self.RelationsFilePath:
files = [f for f in listdir(paths) if isfile(join(paths, f))]
for file in files:
remote_file = open(paths + file)
Noteid = file.split(".")[0]
PatientNote = self.ClinicalNotes[Noteid]
try:
Coreferences = Coreference_Note[Noteid]
except:
Coreferences = {}
Relations = {}
for line in remote_file:
line = line.replace("|||", "||")
words = line.split("||")
vals = []
for word in [words[0], words[2]]:
term = word.split("=")
full_annotation = "=".join(term[1:])
index = [pos for pos, char in enumerate(full_annotation) if char == "\""]
pos1 = int(index[0])
pos2 = int(index[-1])
annotation = full_annotation[pos1 + 1:pos2]
indxs = full_annotation[pos2 + 1:].split(",")
line_in_note = ""
start_line = None
for indx in indxs:
indx = indx.strip()
out = indx.split(" ")
start_line = out[0].split(":")[0]
start_token = out[0].split(":")[1]
end_line = out[1].split(":")[0]
end_token = out[1].split(":")[1]
line_in_note += "".join(PatientNote[int(start_line) - 1:int(end_line)])
vals.append((annotation, line_in_note, start_line, start_token))
relate = words[1].split("=")[1].split("\"")[1]
val1 = vals[0]
val2 = vals[1]
t1 = val1[0]
t2 = val2[0]
# print(relate)
if relate not in Relations:
Relations[relate] = []
## preprocessing step done when generating question and logical forms, removed from here ##
'''
t1 = self.SimplePreProcess(val1[0])
t2 = self.SimplePreProcess(val2[0])
#print("yes")
if t1 == None:
self.CheckForCoreferences(val1, type[relate][0],Coreferences)
if t2 == None:
self.CheckForCoreferences(val2, type[relate][0], Coreferences)
continue
if t1 == None or t2 == None:
## Just use it because we dont want to miss the answers.
continue
# If atelast one of the concept is a common noun ignore the relation
### Common Noun Check End###
'''
val1 = (t1, type[relate][0], val1[1], val1[2], val1[3])
val2 = (t2, type[relate][1], val2[1], val2[2], val2[3])
if (val1, val2) not in Relations[relate]:
Relations[relate].append((val1, val2))
self.MakeRelationMappings(val1, val2, relate, Noteid)
self.RelationsPerNote[Noteid] = [Relations, PatientNote, Coreferences]
'''
# for cluster_id in self.map_problems_to_test_investigated:
# try:
# out = self.map_problems_to_test_revealed[cluster_id]
# print(self.map_problems_to_test_investigated[cluster_id])
# print(out)
# print("\n")
# except:
# continue
print(Relations.keys())
try:
relation_investigated = Relations["TeCP"]
relation_revealed = Relations["TeRP"]
except:
continue
values = zip(*relation_revealed)
for annotations in relation_investigated:
try:
index_val = list(values[0]).index(annotations[0][0])
except:
continue
for idx in index_val:
print(annotations)
print(values[2][idx])
'''
def ReadCoreference(self,coref_path,PatientNote):
remote_file = open(coref_path.replace("docs","chains") + ".chains")
coref_concepts = {}
for line in remote_file:
line = line.replace("|||", "||")
words = line.split("||")
vals = []
type = words[-1].replace("\"","").split("=")[-1].strip().replace("coref ","")
if type not in coref_concepts and type != "person":
coref_concepts[type] = []
if type == "person":
continue
for word in words[0:-1]:
term = word.split("=")
full_annotation = "=".join(term[1:])
index = [pos for pos, char in enumerate(full_annotation) if char == "\""]
pos1 = int(index[0])
pos2 = int(index[-1])
annotation = full_annotation[pos1 + 1:pos2]
indxs = full_annotation[pos2 + 1:].split(",")
line_in_note = ""
start_line = None
for indx in indxs:
indx = indx.strip()
out = indx.split(" ")
start_line = out[0].split(":")[0]
start_token = out[0].split(":")[1]
end_line = out[1].split(":")[0]
end_token = out[1].split(":")[1]
end_token = out[1].split(":")[1]
line_in_note += "".join(PatientNote[int(start_line) - 1:int(end_line)])
vals.append((annotation,line_in_note,start_line,start_token))
coref_concepts[type].append(vals)
return coref_concepts
def ReadAssertionsData(self):
self.problem_status = {}
for paths in self.AstFilePath:
files = [f for f in listdir(paths) if isfile(join(paths, f))]
for file in files:
remote_file = open(paths + file)
Noteid = file.split(".")[0]
PatientNote = self.ClinicalNotes[Noteid]
if Noteid not in self.problem_status:
self.problem_status[Noteid] = {}
for line in remote_file:
line = line.replace("|||", "||")
words = line.split("||")
vals = []
type = words[1].split("=")[1].split("\"")[1]
status = words[2].split("=")[1].split("\"")[1]
for word in [words[0]]:
term = word.split("=")
full_annotation = "=".join(term[1:])
index = [pos for pos, char in enumerate(full_annotation) if char == "\""]
pos1 = int(index[0])
pos2 = int(index[-1])
annotation = full_annotation[pos1 + 1:pos2]
indxs = full_annotation[pos2 + 1:].split(",")
line_in_note = ""
start_line = None
annotation = self.SimplePreProcess(annotation)
for indx in indxs:
indx = indx.strip()
out = indx.split(" ")
start_line = out[0].split(":")[0]
start_token = out[0].split(":")[1]
end_line = out[1].split(":")[0]
end_token = out[1].split(":")[1]
line_in_note += "".join(PatientNote[int(start_line) - 1:int(end_line)])
if annotation == None:
continue
if type == "problem":
if annotation not in self.problem_status[Noteid]:
self.problem_status[Noteid][annotation] = []
self.problem_status[Noteid][annotation].append((status,line_in_note,start_line,start_token))
######################## Main program functions ##########################################
def ReadTemplates(self):
self.relations_out = {"paragraphs": [], "title": "relations"}
self.logical_out = []
########################################## Set File Paths ##############################################
### File to write Question-Answers ##
if qa_csv_write:
ofile = open(qa_output, "w")
self.filewriter = csv.writer(ofile, delimiter="\t")
self.filewriter.writerow(
["Question", "Logical Form", "Answer", "Answer line in note", "Note ID", "Difference in QA lines"])
### File to write Question-Logical Forms ##
if ql_csv_write:
ofile = open(ql_output, "w")
self.filewriter_forlform = csv.writer(ofile, delimiter="\t")
self.filewriter_forlform.writerow(["Question", "Logical Form"])
### File to read templates ###
file = open(template_file_path)
filereader = list(csv.reader(file))
## read only templates relevant to relations challenge ##
rel_lines = []
for line in filereader[1:]:
if line[0] != "relations":
continue
rel_lines.append(line)
########################################## Main Function Call ##############################################
total_questions = 0
for Noteid in self.RelationsPerNote:
[Relations, PatientNote, Coreferences] = self.RelationsPerNote[Noteid]
out_patient = {"note_id": Noteid, "context": PatientNote, "qas": []}
self.unique_questions = []
for line in rel_lines:
question = line[2].strip()
logical_form = line[3].strip()
helper = line[4].split(",")
helper = [type.strip() for type in helper]
answertype = line[5].strip()
question = question.replace("|medication| or |medication|", "|medication|") ## added ##
question = question.replace("|problem| or |problem|", "|problem|") ## added ##
question = question.replace("|test| or |test|", "|test|") ## added ##
question = question.replace("|test| |test| |test|", "|test|") ## added ##
question = question.replace("\t", "")
logical_form = logical_form.replace("\t", "")
question = question.replace("\t", "")
logical_form = logical_form.replace("\t", "")
if question.strip() == "":
continue
## check for errors in templates and gather all the placeholders in the templates (placeholders stored in rwords) ##
## semantic types of placeholders ##
types_to_replace = self.checking_for_errors(question, logical_form)
if len(types_to_replace) != 0:
types_to_replace = list(types_to_replace[0])
else:
types_to_replace = []
answer_out = self.MakeLabTestQA(question, logical_form, types_to_replace, answertype, helper, Relations, Noteid, Coreferences)
if len(answer_out) != 0:
out_patient["qas"].extend(answer_out)
total_questions += len(self.unique_questions)
self.relations_out["paragraphs"].append(out_patient)
print(total_questions)
print(self.count_corefs)
print(self.resolved_corefs)
with open(relations_qa_output_json, 'w') as outfile:
json.dump(self.relations_out, outfile, ensure_ascii=False)
def MakeLabTestQA(self, question, logical_form, types_to_replace, answertype, helper, Relations, Noteid, Coreferences):
orginal_question = question
logical_form_template = logical_form
answer_out = []
for relate in helper:
if relate == "ast":
questions_list = question.strip().split("##")
## fixed a bug, intially not included ##
answer_out = self.HandleAssertionQA(Noteid, types_to_replace, questions_list, logical_form_template, Coreferences, answertype) ## fixed bug, intially was not including assertations data
else:
try:
relevant_relations = Relations[relate] ## Get relations which satisy the relate criteria
except:
continue
for val1, val2 in relevant_relations:
annotations = {val1[1]: (val1[0], val1[2], val1[3], val1[4]),
val2[1]: (val2[0], val2[2], val2[3], val2[4])}
## check if there are placeholders in the question, call function to replace the placeholders ##
if len(types_to_replace) != 0:
questions_list = question.strip().split("##")
out = self.MakeQuestion_new(types_to_replace, annotations, questions_list, logical_form_template, Coreferences, Noteid)
if out == None:
continue
else:
[question_list, logical_form, question_lines, question_start_line, question_start_token] = out
else:
## if no placeholders directly use the question ##
[question_list, logical_form, question_lines, question_start_line, question_start_token]= [question.split("##"), logical_form_template, "", "", ""]
### Writing question - logical form ##
paraphrase_questions = set(question_list)
question_templates = orginal_question.split("##")
if len(question_list) != len(question_templates):
print(question_list)
print(question_templates)
unique_tup = list(set(zip(question_list, question_templates)))
if ql_csv_write:
for qidx in range(len(unique_tup)):
self.filewriter_forlform.writerow([unique_tup[qidx][0]] + [logical_form] + [unique_tup[qidx][1]] + [logical_form_template])
##### Make answers for the succesful questions ####
[answer, answer_line, answer_start_line, answer_start_token] = self.AnswerSubFunction(answertype, val1, val2, Noteid, relate, question_lines, question_start_line, question_start_token)
if len(answer) != 0:
if paraphrase_questions not in self.unique_questions:
self.unique_questions.append(paraphrase_questions)
ans_list = []
for idx in range(len(answer)):
start_line = answer_start_line[idx]
start_token = answer_start_token[idx]
if answertype == ["problems,status"]:
#entity_type = "complex"
entity_type = "empty"
elif answer[idx] == "":
entity_type = "empty"
else:
entity_type = "single"
#if answer[idx] == "" and start_token != "":
# print(paraphrase_questions)
val = {"answer_start": [start_line, start_token], "text": answer[idx], "evidence": answer_line[idx], "evidence_start": start_line, "answer_entity_type": entity_type}
if val not in ans_list:
ans_list.append(val)
## ""evidence"" in the dictionary above is currently just the answer line in the note. You can also consider question line and answer line from note as evidence in that uncomment below code and use it accordingly ##
'''
## evidence per answer ##
evidence_answer = []
evidence_start = []
evidence_temp_line = question_line + answer_line
evidence_temp_start = question_start_line + answer_start_line
for pdx in range(len(evidence_temp_line)):
if evidence_temp_line[pdx] not in evidence_answer:
evidence_answer.append(evidence_temp_line[pdx])
evidence_start.append(evidence_temp_start[pdx])
if answer[idx] == "yes" or answer[idx] == "no":
start_line = ""
start_token = ""
else:
start_line = answer_start_line[idx]
start_token = answer_start_token[idx]
val = {"answer_start": [start_line, start_token], "text": answer[idx],"evidence": evidence_answer,"evidence_start": evidence_start}
# evidence will have q_line_answer_line
if qa_csv_write:
result_num = answer_start_line + question_start_line
perms = list(
itertools.product(result_num, result_num)) ## find different pairs of numbers ##
diffs = [abs(val1 - val2) for (val1, val2) in perms]
difference = max(diffs)
Note_val = "#".join(list(set(evidence_temp_line)))
self.filewriter.writerow( ["##".join(paraphrase_questions)] + [logical_form] + [",".join(answer)] + [Note_val] + [Noteid + "_RelationsChallenge"] + [difference])
'''
answer_temp = {"answers": ans_list,"id": [zip(question_list, question_templates), logical_form_template], "question": list(paraphrase_questions)}
answer_out.append(answer_temp)
return answer_out
def HandleAssertionQA(self,Noteid,dup_rwords, question_list_templates, logical_form_template,Coreferences, answertype):
types_to_replace = list(dup_rwords)
answer_out = []
if len(dup_rwords) != 0:
for problem in self.problem_status[Noteid]:
answer = []
result_num = []
answer_line = []
result_token = []
logical_form = logical_form_template
status = self.problem_status[Noteid][problem]
rwords = list(dup_rwords)
flag = 0
for idx in range(len(rwords)):
#print(problem)
(t1,valid_list) = self.CheckIfConceptValid((problem,status[0][1],status[0][2],status[0][3]),rwords[idx], Coreferences )
if t1 == None:
if valid_list != None:
replace_annoation = random.choice(valid_list)
rwords[idx] = replace_annoation
else:
flag = 1
else:
rwords[idx] = t1
if flag == 1:
continue
new_question_list = []
### Make Question ###
for question in question_list_templates:
done = []
idx = 0
for types in list(types_to_replace):
index = question.find("|" + types + "|")
if index == -1 and types not in done:
print(question, "|" + types + "|", done)
question = question.replace("|" + types + "|", rwords[idx])
done.append(types)
idx += 1
#if question not in new_question_list:
new_question_list.append(question)
## ### Make Logical Form ###
idx = 0
done = []
for types in list(types_to_replace):
index = logical_form.find("|" + types + "|")
if index == -1 and types not in done:
print(logical_form, "|" + types + "|", done, types)
done.append(types)
logical_form = logical_form.replace("|" + types + "|", rwords[idx])
idx += 1
for val in status:
#print(val[0])
answer.append(val[0])
answer_line.append(val[1])
result_num.append(int(val[2]))
result_token.append(int(val[3]))
if answertype == "none":
question_templates = question_list_templates
unique_tup = list(set(zip(new_question_list, question_templates)))
for qidx in range(len(unique_tup)):
self.filewriter_forlform.writerow([unique_tup[qidx][0]] + [logical_form] + [unique_tup[qidx][1]] + [logical_form_template])
else:
question_templates = question_list_templates
if len(new_question_list) != len(question_templates):
print(new_question_list)
print(question_templates)
unique_tup = list(set(zip(new_question_list, question_templates)))
for qidx in range(len(unique_tup)):
self.filewriter_forlform.writerow([unique_tup[qidx][0]] + [logical_form] + [unique_tup[qidx][1]] + [logical_form_template])
if len(answer) != 0:
'''
perms = list(itertools.product(result_num, result_num))
diffs = [abs(val1 - val2) for (val1, val2) in perms]
difference = max(diffs)
question_templates = question_list_templates
Note_val = "#".join(answer_line)
'''
new_question_list = set(new_question_list)
if new_question_list not in self.unique_questions:
'''
if qa_csv_write:
self.filewriter.writerow(["##".join(new_question_list)] + [logical_form] + [",".join(answer)] + [Note_val] + [Noteid + "_RelationsChallenge"] + [ difference])
'''
self.unique_questions.append(set(new_question_list))
ans_list = []
for idx in range(len(answer)):
#print(answer[idx], result_num[idx], result_token[idx])
#val = {"answer_start": [result_num[idx], result_token[idx]], "text": answer[idx], "evidence": answer_line[idx], "evidence_start": result_num[idx]}
val = {"answer_start": [result_num[idx], ""], "text": "", "evidence": answer_line[idx], "evidence_start": result_num[idx], "answer_entity_type": "empty"}
if val not in ans_list:
ans_list.append(val)
# evidence will have q_line_answer_line
answer_temp = {"answers": ans_list, "id": [zip(question_templates,new_question_list),logical_form_template], "question": list(set(new_question_list))}
answer_out.append(answer_temp)
return answer_out
######################## Main Utility Functions ######################################
def MakeRelationMappings(self, val1, val2, relate, Noteid):
# print(self.Entity_to_CoreferenceCluster_map[Noteid]["problem"])
# print((val1[0],val1[2],val1[3],val1[4]))
## If val1 belongs to some cluster, map to that if not map, to the concept directly ##
## Not resolving coreference for answers at this point, so some answers maybe redundant ###
try:
concept_cluster_1 = self.Entity_to_CoreferenceCluster_map[Noteid][val1[1].replace("1", "")][
(val1[0], val1[2], val1[3], val1[4])]
# print(concept_cluster_1)
except:
concept_cluster_1 = val1[0]
try:
concept_cluster_2 = self.Entity_to_CoreferenceCluster_map[Noteid][val2[1].replace("2", "")][
(val2[0], val2[2], val2[3], val2[4])]
# print(concept_cluster_2)
except:
concept_cluster_2 = val2[0]
# print(concept_cluster_2)
if Noteid not in self.map_problems_to_test_revealed:
self.map_problems_to_test_revealed[Noteid] = {}
self.map_tests_to_problem_revealed[Noteid] = {}
self.map_problems_to_test_investigated[Noteid] = {}
self.map_tests_to_problem_investigated[Noteid] = {}
self.allergic_treatments[Noteid] = []
self.problems_to_badtreatment[Noteid] = {}
self.treatments_status_to_problem[Noteid] = {}
self.map_problems_to_treatment[Noteid] = {}
self.badtreatments_to_problem[Noteid] = {}
self.symptoms_to_problem[Noteid] = {}
self.problems_to_symptom[Noteid] = {}
if relate == "TeRP":
## Coreference Checking is ensuring semantic check ##
if concept_cluster_1 not in self.map_problems_to_test_revealed[Noteid]:
self.map_problems_to_test_revealed[Noteid][concept_cluster_1] = []
if concept_cluster_2 not in self.map_tests_to_problem_revealed:
self.map_tests_to_problem_revealed[Noteid][concept_cluster_2] = []
self.map_problems_to_test_revealed[Noteid][concept_cluster_1].append(val2)
self.map_tests_to_problem_revealed[Noteid][concept_cluster_2].append(val1)
if relate == "TeCP":
## Simple checking the name, need to check semantically, or normalize with CUI ##
if concept_cluster_1 not in self.map_problems_to_test_investigated[Noteid]:
self.map_problems_to_test_investigated[Noteid][concept_cluster_1] = []
if concept_cluster_2 not in self.map_tests_to_problem_investigated:
self.map_tests_to_problem_investigated[Noteid][concept_cluster_2] = []
self.map_problems_to_test_investigated[Noteid][concept_cluster_1].append(val2)
self.map_tests_to_problem_investigated[Noteid][concept_cluster_2].append(val1)
if relate == "TrNAP" or relate == "TrCP":
if val1 not in self.allergic_treatments[Noteid]:
self.allergic_treatments[Noteid].append(val1)
if relate == "TrCP":
if concept_cluster_1 not in self.problems_to_badtreatment[Noteid]:
self.problems_to_badtreatment[Noteid][concept_cluster_1] = []
if concept_cluster_2 not in self.badtreatments_to_problem[Noteid]:
self.badtreatments_to_problem[Noteid][concept_cluster_2] = []
self.problems_to_badtreatment[Noteid][concept_cluster_1].append(val2)
self.badtreatments_to_problem[Noteid][concept_cluster_2].append(val1)
if concept_cluster_1 not in self.map_problems_to_treatment[Noteid]:
self.map_problems_to_treatment[Noteid][concept_cluster_1] = []
status = self.tr_status[relate]
self.map_problems_to_treatment[Noteid][concept_cluster_1].append((val2, status))
if relate == "TrIP" or relate == "TrWP" or relate == "TrAP":
if concept_cluster_2 not in self.treatments_status_to_problem[Noteid]:
self.treatments_status_to_problem[Noteid][concept_cluster_2] = []
status = self.tr_status[relate]
self.treatments_status_to_problem[Noteid][concept_cluster_2].append(
(val1, status)) ## val1 is treatment
if concept_cluster_1 not in self.map_problems_to_treatment[Noteid]:
self.map_problems_to_treatment[Noteid][concept_cluster_1] = []
status = self.tr_status[relate]
self.map_problems_to_treatment[Noteid][concept_cluster_1].append((val2, status))
if relate == "PIP":
if concept_cluster_1 not in self.symptoms_to_problem[Noteid]:
self.symptoms_to_problem[Noteid][concept_cluster_1] = []
if concept_cluster_2 not in self.problems_to_symptom[Noteid]:
self.problems_to_symptom[Noteid][concept_cluster_2] = []
self.symptoms_to_problem[Noteid][concept_cluster_1].append(val2)
self.problems_to_symptom[Noteid][concept_cluster_2].append(val1)
def AnswerSubFunction(self, answertype, val1, val2, Noteid, relate, question_lines, question_start_line, question_start_token):
try:
concept_cluster_1 = self.Entity_to_CoreferenceCluster_map[Noteid][val1[1].replace("1", "")][
(val1[0], val1[2], val1[3], val1[4])]
except:
concept_cluster_1 = val1[0]
try:
concept_cluster_2 = self.Entity_to_CoreferenceCluster_map[Noteid][val2[1].replace("2", "")][
(val2[0], val2[2], val2[3], val2[4])]
except:
concept_cluster_2 = val2[0]
answer = []
result_start_line = []
result_start_token = []
answer_line = []
######################## rules for test answers ########################
if answertype == "yes/no" or answertype == "abnormal" or answertype == "yes":
#answer = ["yes"]* len(question_lines)
answer = [""] * len(question_lines)
answer_line.extend(question_lines)
result_start_line.extend(question_start_line)
#result_start_token.extend(question_start_token)
result_start_token = [""] * len(question_lines)
elif answertype == "tests_investigated":
tests = self.map_tests_to_problem_investigated[Noteid][concept_cluster_2]
for test in tests:
answer += [test[0]]
answer_line.append(test[2])
result_start_line.append(int(test[3]))
result_start_token.append(int(test[4]))
elif answertype == "tests_revealed":
tests = self.map_tests_to_problem_revealed[Noteid][concept_cluster_2]
for test in tests:
answer += [test[0]]
answer_line.append(test[2])
result_start_line.append(int(test[3]))
result_start_token.append(int(test[4]))
elif answertype == "conducted_problem_revealed_problem":
try:
investigated_problems = self.map_problems_to_test_investigated[concept_cluster_1]
for problem in investigated_problems:
answer += [problem[0]]
# answer += ["conducted " + problem[0]]
answer_line.append(problem[2])
result_start_line.append(int(problem[3]))
result_start_token.append(int(problem[4]))
except:
pass
try:
revealed_problems = self.map_problems_to_test_revealed[concept_cluster_1]
for problem in revealed_problems:
# answer += ["revealed " + problem[0]]
answer += [problem[0]]
answer_line.append(problem[2])
result_start_line.append(int(problem[3]))
result_start_token.append(int(problem[4]))
except:
pass
elif answertype == "revealed_problem":
try:
revealed_problems = self.map_problems_to_test_revealed[concept_cluster_1]
for problem in revealed_problems:
answer += [problem[0]]
answer_line.append(problem[2])
result_start_line.append(int(problem[3]))
result_start_token.append(int(problem[4]))
except:
#answer = ["no"]*len(question_lines)
answer = [""] * len(question_lines)
answer_line.extend(question_lines)
result_start_line.extend(question_start_line)
#result_start_token.extend(question_start_token)
result_start_token = [""] * len(question_lines)
elif answertype == "problems_investigated":
problems = self.map_problems_to_test_investigated[Noteid][concept_cluster_1]
# print(problems)
for problem in problems:
answer += [problem[0]]
answer_line.append(problem[2])
result_start_line.append(int(problem[3]))
result_start_token.append(int(problem[4]))
##########################################################################################################################################
elif answertype == "allergic_treatments":
events = self.allergic_treatments[Noteid]
for event in events:
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
elif answertype == "treatments, status":
events = self.treatments_status_to_problem[Noteid][concept_cluster_2]
for temp in events:
(event, status) = temp
'''
stemp = ""
status = status.strip()
if val2[0] in self.problem_status[Noteid]:
out = self.problem_status[Noteid][val2[0]]
if out[1] == question_line and out[2] == line_num:
stemp = out[0]
status += ", "+stemp
'''
# answer += [event[0] + " (" + status + ")"]
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
elif answertype == "problems,status":
try:
events = self.map_problems_to_treatment[Noteid][concept_cluster_1]
# print(events)
if "causes" in zip(*events)[1] and "improves" in zip(*events)[1]:
print(Noteid)
for temp in events:
(event, status) = temp
#answer += [event[0] + " (" + status + ")"]
#answer.append([event[0], status])
answer.append("")
# answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
except:
caused_problems = self.problems_to_badtreatment[Noteid][concept_cluster_1]
for event in caused_problems:
#answer += [event[0] + " (" + "caused" + ")"]
#answer.append([event[0] , "caused"])
# answer += [event[0]]
answer.append("")
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
elif answertype == "no":
#answer = ["no"]*len(question_lines)
answer = [""] * len(question_lines)
answer_line.extend(question_lines)
result_start_line.extend(question_start_line)
#result_start_token.extend(question_start_token)
result_start_token = [""] * len(question_lines)
elif answertype == "problems_check_conducted":
events = self.map_problems_to_treatment[Noteid][concept_cluster_1]
for temp in events:
(event, status) = temp
# answer += ["treatment:" + event[0]]
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
try:
treatment_entities_list = self.CoreferenceCluster_to_Entity_map["treatment"][concept_cluster_1]
tests = self.map_problems_to_test_investigated[Noteid]
for test in tests:
test_entities_list = self.CoreferenceCluster_to_Entity_map["test"][test]
new_set = set(test_entities_list).intersection(set(treatment_entities_list))
if len(new_set) != 0:
events = self.map_problems_to_test_investigated[Noteid][test]
for temp in events:
(event, status) = temp
# answer += ["tests:" + event[0]]
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
break
except:
pass
elif answertype == "problems":
if relate == "TrCP":
pass
# events = self.problems_to_badtreatment[Noteid][concept_cluster_1]
# for event in events:
# answer += [event[0]]
# answer_line.append(event[2])
# result_start_line.append(int(event[3]))
# result_start_token.append(int(event[4]))
else:
events = self.map_problems_to_treatment[Noteid][concept_cluster_1]
for temp in events:
(event, status) = temp
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
elif answertype == "treatments":
events = self.treatments_status_to_problem[Noteid][concept_cluster_2]
for temp in events:
(event, status) = temp
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
elif answertype == "problem1, treatment":
try:
events = self.badtreatments_to_problem[Noteid][concept_cluster_2]
for event in events:
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
except:
pass
'''
try:
events = self.problems_to_symptom[Noteid][concept_cluster_2]
for event in events:
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
except:
print(relate,answertype)
pass
'''
elif answertype == "problem1":
events = self.problems_to_symptom[Noteid][concept_cluster_2]
for event in events:
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
elif answertype == "symptoms":
events = self.symptoms_to_problem[Noteid][concept_cluster_1]
for event in events:
answer += [event[0]]
answer_line.append(event[2])
result_start_line.append(int(event[3]))
result_start_token.append(int(event[4]))
elif answertype == "none":
answer = []
else:
print(answertype)
answer = []
return [answer, answer_line, result_start_line, result_start_token]
def MakeQuestion_new(self, types_to_replace, annotations, question_list, logical_form_template, Coreferences, Noteid):
new_question_list = []
question_start_line = []
question_start_token = []
question_line = []
rwords = list(types_to_replace)
for idx in range(len(rwords)):
question_start_line.append(int(annotations[rwords[idx]][2]))
question_start_token.append(int(annotations[rwords[idx]][3]))
question_line.append(annotations[rwords[idx]][1])
(t1, valid_list) = self.CheckIfConceptValid(annotations[rwords[idx]], rwords[idx], Coreferences)
if t1 == None:
if valid_list != None:
replace_annoation = random.choice(valid_list) ### all of them can be used for QL forms (more training data)
# print(annotations[rwords[idx]])
rwords[idx] = replace_annoation
else:
return None
else:
rwords[idx] = t1
for question in question_list:
done = []
idx = 0
for types in list(types_to_replace):
# temp = qwords
index = question.find("|" + types + "|")
if index == -1 and types not in done:
print(question, "|" + types + "|", done)
question = question.replace("|" + types + "|", rwords[idx])
done.append(types)
idx += 1
new_question_list.append(question)
idx = 0
done = []
for types in list(types_to_replace):
index = logical_form_template.find("|" + types + "|")
if index == -1 and types not in done:
print(logical_form_template, "|" + types + "|", done, types)
done.append(types)
logical_form_template = logical_form_template.replace("|" + types + "|", rwords[idx])
idx += 1
return [new_question_list, logical_form_template, question_line, question_start_line, question_start_token]
######################## Supporting Utility Functions ######################################
#the tremendous tumor burden,the cord compression,gait weakness , stress incontinence copd flare a wide based gait shuffling short steps head computerized tomography scan
def SimplePreProcess(self, word):
if word == "":
return None
lemmatizer = WordNetLemmatizer()
if concept_is_CommonNoun(word) == 1 or concept_is_PastTense(word) == 1:
return None
tag = nltk.pos_tag(nltk.word_tokenize(word))
temp = zip(*tag)
words = list(temp[0])
tags = list(temp[1])
if tags[0] == "DT":
words[0] = ""
else:
pass
for idx in range(len(tags)):
if lemmatizer.lemmatize(words[idx].lower()) in ["patient"]:
words[idx] = ""
if tags[idx] in ["PRP","PRP$"]:
if idx != 0 or " ".join(words[0:idx]).strip() != "":
words[idx] = "the"
if idx == 0:
words[idx] = ""
if " ".join(words[0:idx]).strip() != "" and tags[idx] == ["IN", "WDT"]:
words[idx] = ""
words = [word for word in words if word != "" and lemmatizer.lemmatize(word) not in self.similar] ## check if its okay to start with "further"
if len(words) == 0:
return None
filter = " ".join(words) ## To make sure it makes sense you can use a parse#
tag = nltk.pos_tag(nltk.word_tokenize(filter))
temp = zip(*tag)
words = list(temp[0])
tags = list(temp[1])
if len(set(["NN","NNS","jjR","JJS","JJ","NNP","NNPS","VB","VBG","VBP","VBZ"]).intersection(set(tags))) == 0:
return None
#events = word
#fevent = []
#out = events.split(" ")
#for val in out:
# if (val.lower().find("patient") == -1):
# fevent.append(val)
#if len(fevent) == 0:
# return None
#events = " ".join(fevent) # Remove Patient or any other common words
#exclude = set(string.punctuation)
#s = ''.join(ch for ch in filter if ch not in exclude)
#print(filter)
return filter
def CheckForCoreferences(self,concept, type ,Coreferences):
self.count_corefs += 1
valid_list = []
if type == "problem1" or type == "problem2":
type = "problem"
try:
coref_lists = Coreferences[type]
except:
#print(type,Coreferences.keys())
return None
for coref_list in coref_lists:
if concept in coref_list:
#print(concept[0],zip(*coref_list)[0])
for idx in range(len(zip(*coref_list)[0])):
coref_concept = zip(*coref_list)[0][idx]
sout = self.SimplePreProcess(coref_concept)
#out_list = list(coref_list[idx])
#out_list.append(sout) ############################ correct grammar ot not #############
if sout != None and sout not in valid_list:
valid_list.append(sout)
#print(concept[0],valid_list,set(zip(*coref_list)[0]).symmetric_difference(set(valid_list)))
if len(valid_list) != 0:
self.resolved_corefs += 1
return valid_list
else:
return None
def CheckIfConceptValid(self,val, type, Coreferences):
t1 = self.SimplePreProcess(val[0])
valid_list = None
## currently only looking for coreference if orginal word is not valid, can use it to change orginal concepts as well ###
if t1 == None:
valid_list = self.CheckForCoreferences(val, type ,Coreferences)
#print(val[0],valid_list,Coreferences[type])
else:
pass
return (t1,valid_list)
# If atelast one of the concept is a common noun ignore the relation
### Common Noun Check End###
def checking_for_errors(self, question_list,logical_form_template):
question_list = question_list.split("##")
qwords_list = []
dup_rwords_list = []
unique_templates = []
#logical_form_template = logical_form_template.replace("|treatment|", "|medication|").strip()
for question in question_list:
if question.strip() == "":
continue
#question = question.replace("|medication| or |medication|", "|medication|")
#question = question.replace("|treatment|", "|medication|").strip()
if question not in unique_templates:
unique_templates.append(question)
else:
continue
qtemplate = question
qwords = question.split("|")
dup_rwords = qwords[1:len(qwords):2]
qwords_list.append(qwords)
if len(dup_rwords_list) == 0:
dup_rwords_list = [set(dup_rwords)]
else:
if set(dup_rwords) not in dup_rwords_list:
print("Error Out Of Context Question:")
print(question, logical_form_template, question_list)
return []
lwords = logical_form_template.split("|")
dup_lrwords = lwords[1:len(lwords):2]
if set(dup_lrwords) not in dup_rwords_list:
print("Error Out Of Context Question-Logical Form Pairs:")
print(question_list, logical_form_template)
return []
if len(dup_rwords_list) != 1:
print("Check Question_Logical Form Mapping")
print(dup_rwords_list, question_list)
print(logical_form_template)
return []
return dup_rwords_list
if __name__=="__main__":
GenerateRelationsQuestions()
| 56,886
| 42.227204
| 247
|
py
|
emrQA
|
emrQA-master/generation/i2b2_heart_disease_risk/risk-answers.py
|
from os import listdir
import xmltodict
import csv
import sys
import json
import random
import argparse
import os
reload(sys)
sys.setdefaultencoding("ISO-8859-1")
parser = argparse.ArgumentParser()
parser.add_argument('--i2b2_dir', default='', help='Directory containing i2b2 heart disease risk challange files')
parser.add_argument('--templates_dir', default='', help='Directory containing template files in the given format')
parser.add_argument('--output_dir', default='', help='Directory to store the output')
args = parser.parse_args()
###################################################### SET FILE PATHS ##################################################################
## i2b2 file paths ##
RiskFilePath = [os.path.join(args.i2b2_dir,"training-RiskFactors-Complete-Set1/")]
## template file path ##
template_file_path = args.templates_dir
## output file paths ##
qa_output = os.path.join(args.output_dir,"risk-qa.csv")
ql_output = os.path.join(args.output_dir,"risk-ql.csv")
risk_qa_output_json = os.path.join(args.output_dir,"risk-qa.json")
######################################################## CODE #########################################################################
################################################# STANDARD VALUES FROM THE i2b2 heart disease risk paper paper #####################################################################
test_value = {"A1C":"6.5", "glucose": "126", "Cholestrol":"240", "LDL":"100 mg/dl", "blood pressure": "140/90 mm/hg", "BMI": "30"}
dictionary = {}
dictionary = {"high chol.": ("HYPERLIPIDEMIA","Cholestrol"), "A1C": ("DIABETES","A1C"), "high bp": ("HYPERTENSION","blood pressure"),
"BMI": ("OBESITY","BMI"), "glucose":("DIABETES","glucose"), "high LDL":("HYPERLIPIDEMIA","LDL")}
disease_test = {}
disease_test["HYPERLIPIDEMIA"] = ["high chol.","high LDL"]
disease_test["Diabetes"] = ["A1C","glucose"]
disease_test["HYPERTENSION"] = ["high bp"]
disease_test["OBESE"] = ["BMI"]
disease_test["CAD"] = []
def num_there(s):
return any(i.isdigit() for i in s)
test_annotations = []
problem_annotations = []
for key in dictionary:
test_annotations.append([dictionary[key][1]])
problem_annotations.append(dictionary[key][0])
class RiskFileAnalysis():
def __init__(self):
self.list_medications = []
self.types = []
self.ReadFile()
self.ReadTemplates()
#self.WriteTimeData()
################################ Read the Risk Files #######################################################
def ReadFile(self):
file_path = RiskFilePath
TempFile = "temp_risk.txt"
self.Patients = {}
self.RiskAnnotationsPerNote = {}
for paths in file_path:
files = listdir(paths)
files.sort()
for file in files:
[patient_id,record] = file.split("-")
id = record.split(".")[0]
self.Patients = {}
if patient_id not in self.Patients:
self.Patients[patient_id] = []
ofile = open(TempFile, "w", 0)
remote_file = open(paths + file)
for line in remote_file:
try:
ofile.write(line)
except:
print("error writing file")
ofile.close()
with open(TempFile) as fd:
self.doc = xmltodict.parse(fd.read())
self.ReadDiabetes(patient_id)
self.ReadCAD(patient_id)
self.ReadHyperlipedimia(patient_id)
self.ReadHYPERTENSION(patient_id)
self.ReadObesity(patient_id)
if patient_id not in self.RiskAnnotationsPerNote:
self.RiskAnnotationsPerNote[patient_id] = [[],[],[]] ## clinical note, record date, annotations_note
out = {}
for tuple in self.Patients[patient_id]:
out[tuple[2].keys()[0]] = tuple[2][tuple[2].keys()[0]]
self.RiskAnnotationsPerNote[patient_id][2].append(out)
self.RiskAnnotationsPerNote[patient_id][0].append(tuple[0])
self.RiskAnnotationsPerNote[patient_id][1].append(tuple[1])
def ReadHYPERTENSION(self, patient_id):
disease = "HYPERTENSION"
Medications = ['beta blocker', 'calcium channel blocker', 'thiazolidinedione', 'ARB']
## Read Note
Clinical_Notes = self.doc['root']["TEXT"]
sentences = Clinical_Notes.split("\n") ##chnaged from full stop to new linw
CharPos = 0
indices = []
for line in sentences:
indices.append((CharPos, CharPos + len(line), line))
CharPos = CharPos + 1 + len(line) ### +1 to account for the "\n"
start = ""
end = ""
try:
Record_Date = ("","","")
for idx in range(len(self.doc['root']["TAGS"]["PHI"])):
TYPE = self.doc['root']["TAGS"]["PHI"][idx]["@TYPE"]
if TYPE == "DATE":
## ist is the date
start = self.doc['root']["TAGS"]["PHI"][idx]["@start"]
end = self.doc['root']["TAGS"]["PHI"][idx]["@end"]
text = self.doc['root']["TAGS"]["PHI"][idx]["@text"]
break
else:
continue
except:
print(self.doc['root']["TAGS"]["PHI"])
text = self.doc['root']["TAGS"]["PHI"]["@text"]
start = self.doc['root']["TAGS"]["PHI"]["@start"]
end = self.doc['root']["TAGS"]["PHI"]["@end"]
if start != "" :
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
#start_evidence = tup_id
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
#print(inline_text)
start_inline_text = start_evidence
Record_Date = (text, inline_text, start_inline_text, start)
#print(Record_Date)
### Create Events ##
Dictionary = {}
Dictionary[disease] = {}
Dictionary[disease]["mention"] = {}
Dictionary[disease]["high bp"] = {}
# print(sentences)
# print(Record_Date)
try:
NumIndoc = len(self.doc['root']["TAGS"][disease])
except:
# print(Record_Date)
# self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
return
for docid in range(NumIndoc):
try:
count = len(self.doc['root']["TAGS"][disease][docid][disease])
b = 0
except:
try:
count = len(self.doc['root']["TAGS"][disease][disease])
b = 1
except:
count = len(self.doc['root']["TAGS"][disease])
b = 3
for idx in range(count):
if b == 0:
indicator = self.doc['root']["TAGS"][disease][docid][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][docid][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
elif b == 1:
indicator = self.doc['root']["TAGS"][disease][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][disease][idx]["@id"]
else:
indicator = self.doc['root']["TAGS"][disease][idx]["@indicator"]
try:
text = self.doc['root']["TAGS"][disease][idx]["@text"]
# print(self.doc['root']["TAGS"][disease][idx])
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
except:
print("failed")
# self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
continue
if indicator == "mention":
# rint("mention",text,time)
# start = int(start) - 3
# end = int(end) - 3
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["mention"]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)].append(time)
elif indicator == "high bp":
# print("A1C",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["high bp"]:
Dictionary[disease]["high bp"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["high bp"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["high bp"][(text, inline_text, start_inline_text, start)].append(time)
else:
print(indicator)
continue
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
def ReadCAD(self, patient_id):
disease = "CAD"
Medications = [u'ACE inhibitor', u'thienopyridine', u'beta blocker', u'aspirin', u'calcium channel blocker', u'nitrate' ]
## Read Note
Clinical_Notes = self.doc['root']["TEXT"]
sentences = Clinical_Notes.split("\n") ##chnaged from full stop to new linw
CharPos = 0
indices = []
for line in sentences:
indices.append((CharPos, CharPos + len(line), line))
CharPos = CharPos + 1 + len(line) ### +1 to account for the "\n"
start = ""
end = ""
try:
Record_Date = ("","","")
for idx in range(len(self.doc['root']["TAGS"]["PHI"])):
TYPE = self.doc['root']["TAGS"]["PHI"][idx]["@TYPE"]
if TYPE == "DATE":
## ist is the date
start = self.doc['root']["TAGS"]["PHI"][idx]["@start"]
end = self.doc['root']["TAGS"]["PHI"][idx]["@end"]
text = self.doc['root']["TAGS"]["PHI"][idx]["@text"]
break
else:
continue
except:
print(self.doc['root']["TAGS"]["PHI"])
text = self.doc['root']["TAGS"]["PHI"]["@text"]
start = self.doc['root']["TAGS"]["PHI"]["@start"]
end = self.doc['root']["TAGS"]["PHI"]["@end"]
if start != "" :
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
Record_Date = (text, inline_text, start_inline_text, start)
### Create Events ##
Dictionary = {}
Dictionary[disease] = {}
Dictionary[disease]["symptom"] = {}
Dictionary[disease]["test"] = {}
Dictionary[disease]["mention"] = {}
Dictionary[disease]["event"] = {}
# print(sentences)
# print(Record_Date)
try:
NumIndoc = len(self.doc['root']["TAGS"][disease])
except:
#print(Record_Date)
#self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
return
for docid in range(NumIndoc):
try:
count = len(self.doc['root']["TAGS"][disease][docid][disease])
b = 0
except:
try:
count = len(self.doc['root']["TAGS"][disease][disease])
b = 1
except:
count = len(self.doc['root']["TAGS"][disease])
b = 3
for idx in range(count):
if b == 0:
indicator = self.doc['root']["TAGS"][disease][docid][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][docid][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
elif b == 1:
indicator = self.doc['root']["TAGS"][disease][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][disease][idx]["@id"]
else:
indicator = self.doc['root']["TAGS"][disease][idx]["@indicator"]
try:
text = self.doc['root']["TAGS"][disease][idx]["@text"]
# print(self.doc['root']["TAGS"][disease][idx])
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
except:
print("failed")
# self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
continue
if indicator == "mention":
# rint("mention",text,time)
#start = int(start) - 3
#end = int(end) - 3
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["mention"]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)].append(time)
elif indicator == "event":
# print("A1C",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["event"]:
Dictionary[disease]["event"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["event"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["event"][(text, inline_text, start_inline_text, start)].append(time)
elif indicator == "test":
# print("glucose",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["test"]:
Dictionary[disease]["test"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["test"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["test"][(text, inline_text, start_inline_text, start)].append(time)
elif indicator == "symptom":
# print("glucose",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["symptom"]:
Dictionary[disease]["symptom"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["symptom"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["symptom"][(text, inline_text, start_inline_text, start)].append(time)
else:
print(indicator)
continue
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
def ReadDiabetes(self,patient_id):
Medications = ["metformin", "insulin", "sulfonylureas", "thiazolidinediones", "GLP-1 agonists",
"Meglitinides",
"DPP4 inhibitors", "Amylin", "anti-diabetes medications"]
## Read Note
Clinical_Notes = self.doc['root']["TEXT"]
sentences = Clinical_Notes.split("\n") ##chnaged from full stop to new linw
CharPos = 0
indices = []
for line in sentences:
indices.append((CharPos, CharPos + len(line), line))
CharPos = CharPos + 1 + len(line) ### +1 to account for the "\n"
start = ""
end = ""
try:
Record_Date = ("","","")
for idx in range(len(self.doc['root']["TAGS"]["PHI"])):
TYPE = self.doc['root']["TAGS"]["PHI"][idx]["@TYPE"]
if TYPE == "DATE":
## ist is the date
start = self.doc['root']["TAGS"]["PHI"][idx]["@start"]
end = self.doc['root']["TAGS"]["PHI"][idx]["@end"]
text = self.doc['root']["TAGS"]["PHI"][idx]["@text"]
break
else:
continue
except:
print(self.doc['root']["TAGS"]["PHI"])
text = self.doc['root']["TAGS"]["PHI"]["@text"]
start = self.doc['root']["TAGS"]["PHI"]["@start"]
end = self.doc['root']["TAGS"]["PHI"]["@end"]
if start != "" :
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
Record_Date = (text, inline_text, start_inline_text, start)
### Create Events ##
Dictionary = {}
Dictionary["Diabetes"] = {}
Dictionary["Diabetes"]["glucose"] = {}
Dictionary["Diabetes"]["A1C"] = {}
Dictionary["Diabetes"]["mention"] = {}
#print(sentences)
#print(Record_Date)
try:
NumIndoc = len(self.doc['root']["TAGS"]["DIABETES"])
except:
#print(Record_Date)
#self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, "Diabetes")
return
for docid in range(NumIndoc):
try:
count = len(self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"])
b = 0
except:
try:
count = len(self.doc['root']["TAGS"]["DIABETES"]["DIABETES"])
b = 1
except:
count = len(self.doc['root']["TAGS"]["DIABETES"])
b = 3
for idx in range(count):
if b == 0:
indicator = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@indicator"]
text = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@text"]
time = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@time"]
start = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@start"]
end = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@end"]
id = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@id"]
elif b == 1:
indicator = self.doc['root']["TAGS"]["DIABETES"]["DIABETES"][idx]["@indicator"]
text = self.doc['root']["TAGS"]["DIABETES"]["DIABETES"][idx]["@text"]
time = self.doc['root']["TAGS"]["DIABETES"]["DIABETES"][idx]["@time"]
start = self.doc['root']["TAGS"]["DIABETES"]["DIABETES"][idx]["@start"]
end = self.doc['root']["TAGS"]["DIABETES"]["DIABETES"][idx]["@end"]
id = self.doc['root']["TAGS"]["DIABETES"]["DIABETES"][idx]["@id"]
else:
indicator = self.doc['root']["TAGS"]["DIABETES"][idx]["@indicator"]
try:
text = self.doc['root']["TAGS"]["DIABETES"][idx]["@text"]
#print(self.doc['root']["TAGS"]["DIABETES"][idx])
time = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@time"]
start = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@start"]
end = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@end"]
id = self.doc['root']["TAGS"]["DIABETES"][docid]["DIABETES"][idx]["@id"]
except:
print("failed")
#self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
continue
if indicator == "mention":
#rint("mention",text,time)
# start = int(start) - 3
# end = int(end) - 3
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <=indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text,start) not in Dictionary["Diabetes"]["mention"]:
Dictionary["Diabetes"]["mention"][(text, inline_text, start_inline_text,start)] = []
if time not in Dictionary["Diabetes"]["mention"][(text, inline_text, start_inline_text,start)]:
Dictionary["Diabetes"]["mention"][(text, inline_text, start_inline_text,start)].append(time)
elif indicator == "A1C":
#print("A1C",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text,start) not in Dictionary["Diabetes"]["A1C"]:
Dictionary["Diabetes"]["A1C"][(text, inline_text, start_inline_text,start)] = []
if time not in Dictionary["Diabetes"]["A1C"][(text, inline_text, start_inline_text,start)]:
Dictionary["Diabetes"]["A1C"][(text, inline_text, start_inline_text,start)].append(time)
elif indicator == "glucose":
#print("glucose",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text,start) not in Dictionary["Diabetes"]["glucose"]:
Dictionary["Diabetes"]["glucose"][(text, inline_text, start_inline_text,start)] = []
if time not in Dictionary["Diabetes"]["glucose"][(text, inline_text, start_inline_text,start)]:
Dictionary["Diabetes"]["glucose"][(text, inline_text, start_inline_text,start)].append(time)
else:
print(indicator)
continue
self.ReadMedication(patient_id,indices,Clinical_Notes,Record_Date,Dictionary, Medications, "Diabetes")
def ReadHyperlipedimia(self, patient_id):
disease = "HYPERLIPIDEMIA"
Medications = [ "statin", "ezetimibe", "niacin", "fibrate"]
# ## Read Note
Clinical_Notes = self.doc['root']["TEXT"]
sentences = Clinical_Notes.split("\n") ##chnaged from full stop to new linw
CharPos = 0
indices = []
for line in sentences:
indices.append((CharPos, CharPos + len(line), line))
CharPos = CharPos + 1 + len(line) ### +1 to account for the "\n"
start = ""
end = ""
try:
Record_Date = ("","","")
for idx in range(len(self.doc['root']["TAGS"]["PHI"])):
TYPE = self.doc['root']["TAGS"]["PHI"][idx]["@TYPE"]
if TYPE == "DATE":
## ist is the date
start = self.doc['root']["TAGS"]["PHI"][idx]["@start"]
end = self.doc['root']["TAGS"]["PHI"][idx]["@end"]
text = self.doc['root']["TAGS"]["PHI"][idx]["@text"]
break
else:
continue
except:
print(self.doc['root']["TAGS"]["PHI"])
text = self.doc['root']["TAGS"]["PHI"]["@text"]
start = self.doc['root']["TAGS"]["PHI"]["@start"]
end = self.doc['root']["TAGS"]["PHI"]["@end"]
if start != "" :
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
Record_Date = (text, inline_text, start_inline_text, start)
### Create Events ##
Dictionary = {}
Dictionary[disease] = {}
Dictionary[disease]["high chol."] = {}
Dictionary[disease]["high LDL"] = {}
Dictionary[disease]["mention"] = {}
try:
NumIndoc = len(self.doc['root']["TAGS"][disease])
except:
# print(Record_Date)
# self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
return
for docid in range(NumIndoc):
try:
count = len(self.doc['root']["TAGS"][disease][docid][disease])
b = 0
except:
try:
count = len(self.doc['root']["TAGS"][disease][disease])
b = 1
except:
count = len(self.doc['root']["TAGS"][disease])
b = 3
for idx in range(count):
if b == 0:
indicator = self.doc['root']["TAGS"][disease][docid][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][docid][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
elif b == 1:
indicator = self.doc['root']["TAGS"][disease][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][disease][idx]["@id"]
else:
indicator = self.doc['root']["TAGS"][disease][idx]["@indicator"]
try:
text = self.doc['root']["TAGS"][disease][idx]["@text"]
# print(self.doc['root']["TAGS"][disease][idx])
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
except:
print("failed")
# self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
continue
if indicator == "mention":
# rint("mention",text,time)
# start = int(start) - 3
# end = int(end) - 3
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["mention"]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)].append(time)
elif indicator == "high chol.":
# print("A1C",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["high chol."]:
Dictionary[disease]["high chol."][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["high chol."][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["high chol."][(text, inline_text, start_inline_text, start)].append(time)
elif indicator == "high LDL":
# print("glucose",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["high LDL"]:
Dictionary[disease]["high LDL"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["high LDL"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["high LDL"][(text, inline_text, start_inline_text, start)].append(time)
else:
print(indicator)
continue
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
def ReadObesity(self,patient_id):
disease = "OBESE"
Medications = []
## Read Note
Clinical_Notes = self.doc['root']["TEXT"]
sentences = Clinical_Notes.split("\n") ##chnaged from full stop to new linw
CharPos = 0
indices = []
for line in sentences:
indices.append((CharPos, CharPos + len(line), line))
CharPos = CharPos + 1 + len(line) ### +1 to account for the "\n"
start = ""
end = ""
try:
Record_Date = ("","","")
for idx in range(len(self.doc['root']["TAGS"]["PHI"])):
TYPE = self.doc['root']["TAGS"]["PHI"][idx]["@TYPE"]
if TYPE == "DATE":
## ist is the date
start = self.doc['root']["TAGS"]["PHI"][idx]["@start"]
end = self.doc['root']["TAGS"]["PHI"][idx]["@end"]
text = self.doc['root']["TAGS"]["PHI"][idx]["@text"]
break
else:
continue
except:
print(self.doc['root']["TAGS"]["PHI"])
text = self.doc['root']["TAGS"]["PHI"]["@text"]
start = self.doc['root']["TAGS"]["PHI"]["@start"]
end = self.doc['root']["TAGS"]["PHI"]["@end"]
if start != "" :
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
Record_Date = (text, inline_text, start_inline_text, start)
### Create Events ##
Dictionary = {}
Dictionary[disease] = {}
Dictionary[disease]["BMI"] = {}
Dictionary[disease]["mention"] = {}
try:
NumIndoc = len(self.doc['root']["TAGS"][disease])
except:
# print(Record_Date)
# self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
return
for docid in range(NumIndoc):
try:
count = len(self.doc['root']["TAGS"][disease][docid][disease])
b = 0
except:
try:
count = len(self.doc['root']["TAGS"][disease][disease])
b = 1
except:
count = len(self.doc['root']["TAGS"][disease])
b = 3
for idx in range(count):
if b == 0:
indicator = self.doc['root']["TAGS"][disease][docid][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][docid][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
elif b == 1:
indicator = self.doc['root']["TAGS"][disease][disease][idx]["@indicator"]
text = self.doc['root']["TAGS"][disease][disease][idx]["@text"]
time = self.doc['root']["TAGS"][disease][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][disease][idx]["@id"]
else:
indicator = self.doc['root']["TAGS"][disease][idx]["@indicator"]
try:
text = self.doc['root']["TAGS"][disease][idx]["@text"]
# print(self.doc['root']["TAGS"][disease][idx])
time = self.doc['root']["TAGS"][disease][docid][disease][idx]["@time"]
start = self.doc['root']["TAGS"][disease][docid][disease][idx]["@start"]
end = self.doc['root']["TAGS"][disease][docid][disease][idx]["@end"]
id = self.doc['root']["TAGS"][disease][docid][disease][idx]["@id"]
except:
print("failed")
# self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
continue
if indicator == "mention":
# rint("mention",text,time)
# start = int(start) - 3
# end = int(end) - 3
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["mention"]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["mention"][(text, inline_text, start_inline_text, start)].append(time)
elif indicator == "BMI":
# print("A1C",text,time)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= \
indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if (text, inline_text, start_inline_text, start) not in Dictionary[disease]["BMI"]:
Dictionary[disease]["BMI"][(text, inline_text, start_inline_text, start)] = []
if time not in Dictionary[disease]["BMI"][(text, inline_text, start_inline_text, start)]:
Dictionary[disease]["BMI"][(text, inline_text, start_inline_text, start)].append(time)
else:
print(indicator)
continue
self.ReadMedication(patient_id, indices, Clinical_Notes, Record_Date, Dictionary, Medications, disease)
def ReadMedication(self,patient_id,indices,Clinical_Notes,Record_Date,Dictionary,Medications,disease):
for med in Medications:
Dictionary[disease][med] = {}
try:
NumIndoc = len(self.doc['root']["TAGS"]["MEDICATION"])
except:
self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
return
for docid in range(NumIndoc):
try:
count = len(self.doc['root']["TAGS"]["MEDICATION"][docid]["MEDICATION"])
b = 0
except:
try:
count = len(self.doc['root']["TAGS"]["MEDICATION"]["MEDICATION"])
b = 1
except:
count = len(self.doc['root']["TAGS"]["MEDICATION"])
b = 3
for idx in range(count):
if b == 0:
indicator = self.doc['root']["TAGS"]["MEDICATION"][docid]["MEDICATION"][idx]["@type1"]
indicator2 = self.doc['root']["TAGS"]["MEDICATION"][docid]["MEDICATION"][idx]["@type2"]
text = self.doc['root']["TAGS"]["MEDICATION"][docid]["MEDICATION"][idx]["@text"]
time = self.doc['root']["TAGS"]["MEDICATION"][docid]["MEDICATION"][idx]["@time"]
start = self.doc['root']["TAGS"]["MEDICATION"][docid]["MEDICATION"][idx]["@start"]
end = self.doc['root']["TAGS"]["MEDICATION"][docid]["MEDICATION"][idx]["@end"]
elif b == 1:
indicator = self.doc['root']["TAGS"]["MEDICATION"]["MEDICATION"][idx]["@type1"]
indicator2 = self.doc['root']["TAGS"]["MEDICATION"]["MEDICATION"][idx]["@type2"]
text = self.doc['root']["TAGS"]["MEDICATION"]["MEDICATION"][idx]["@text"]
time = self.doc['root']["TAGS"]["MEDICATION"]["MEDICATION"][idx]["@time"]
start = self.doc['root']["TAGS"]["MEDICATION"]["MEDICATION"][idx]["@start"]
end = self.doc['root']["TAGS"]["MEDICATION"]["MEDICATION"][idx]["@end"]
else:
indicator = self.doc['root']["TAGS"]["MEDICATION"][idx]["@type1"]
indicator2 = self.doc['root']["TAGS"]["MEDICATION"][idx]["@type2"]
try:
text = self.doc['root']["TAGS"]["MEDICATION"][idx]["@text"]
time = self.doc['root']["TAGS"]["MEDICATION"][idx]["@time"]
start = self.doc['root']["TAGS"]["MEDICATION"][idx]["@start"]
end = self.doc['root']["TAGS"]["MEDICATION"][idx]["@end"]
except:
continue
#print(indicator,indicator2)
if indicator not in self.types:
self.types.append(indicator)
if indicator2 not in self.types:
self.types.append(indicator2)
start = int(start)
end = int(end)
flag_start = 0
for tup_id in range(len(indices)):
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0 and end <= indices[tup_id][1]:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
break
if start >= indices[tup_id][0] and start <= indices[tup_id][1] and flag_start == 0:
start_evidence = indices[tup_id][0]
flag_start = 1
inline_text = indices[tup_id][2]
continue
if end <= indices[tup_id][1] and flag_start == 1:
end_evidence = indices[tup_id][1]
inline_text += "\n" + indices[tup_id][2]
break
if flag_start == 1:
inline_text += "\n" + indices[tup_id][2]
start_inline_text = start_evidence
if len(text.split(" ")) <= 1 and num_there(text) == False: ## Some are noisy remove them
if text not in self.list_medications:
self.list_medications.append(text)
if indicator in Medications:
if (text, inline_text, start_inline_text,start) not in Dictionary[disease][indicator]:
Dictionary[disease][indicator][(text, inline_text, start_inline_text,start)] = []
if time not in Dictionary[disease][indicator][(text, inline_text, start_inline_text,start)]:
Dictionary[disease][indicator][(text, inline_text, start_inline_text,start)].append(time)
if indicator2 in Medications:
if (text, inline_text, start_inline_text,start) not in Dictionary[disease][indicator2]:
Dictionary[disease][indicator2][(text, inline_text, start_inline_text,start)] = []
if time not in Dictionary[disease][indicator2][(text, inline_text, start_inline_text,start)]:
Dictionary[disease][indicator2][(text, inline_text, start_inline_text,start)].append(time)
self.Patients[patient_id].append((Clinical_Notes, Record_Date, Dictionary))
############################## Main Functions ###########################################################
def ReadTemplates(self):
self.logical_out = []
### File to write Question-Answers ##
ofile = open(qa_output, "w")
self.filewriter = csv.writer(ofile, delimiter="\t")
self.filewriter.writerow(
["Question", "Logical Form", "Answer", "Answer line in note", "Note ID"])
### File to write Question-Logical Forms ##
ofile = open(ql_output, "w")
self.filewriter_forlform = csv.writer(ofile, delimiter="\t")
self.filewriter_forlform.writerow(["Question", "Logical Form"])
self.relations_out = {"paragraphs": [], "title": "risk-dataset"}
### File to read templates ###
file = open(template_file_path)
filereader = list(csv.reader(file))
## read only templates relevant to heart disease risk challenge ##
risk_lines = []
for line in filereader[1:]:
if line[0] != "risk":
continue
risk_lines.append(line)
total_questions = 0
for Noteid in self.RiskAnnotationsPerNote:
[PatientNotes, RecordDates, Disease_note] = self.RiskAnnotationsPerNote[Noteid]
# PatientNote = "\n".join(PatientNotes)
PatientNote = ""
print(len(PatientNotes))
for note in PatientNotes:
PatientNote += note + "\n"
offset_notes = [0]
for note in PatientNotes[0:-1]:
new_offset = len(note)+1+offset_notes[-1]
offset_notes.append(new_offset)
#print(offset_notes)
out_patient = {"note_id": Noteid, "context": PatientNote.split("\n"), "qas": []}
self.unique_questions = []
for line in risk_lines:
question = line[2].strip()
answertype = line[4]
# answertype = [type.strip() for type in answertype]
logical_form = line[3].strip()
question = question.replace("\t", "")
logical_form = logical_form.replace("\t", "")
question = question.replace("|medication| or |medication|", "|medication|") ## added ##
question = question.replace("|problem| or |problem|", "|problem|") ## added ##
question = question.replace("|test| or |test|", "|test|") ## added ##
question = question.replace("|test| |test| |test|", "|test|") ## added ##
if question.strip() == "":
continue
types_to_replace = self.checking_for_errors(question, logical_form)
if len(types_to_replace) != 0:
types_to_replace = list(types_to_replace[0])
else:
types_to_replace = []
answer_out = self.MakeRiskQLA(PatientNote, question, answertype, logical_form, Disease_note, RecordDates, Noteid, types_to_replace, offset_notes)
if len(answer_out) != 0:
out_patient["qas"].extend(answer_out)
total_questions += len(self.unique_questions)
self.relations_out["paragraphs"].append(out_patient)
with open(risk_qa_output_json, 'w') as outfile:
json.dump(self.relations_out, outfile, ensure_ascii=False)
def MakeRiskQLA(self, PatientNote, question, answertype, logical_form, Disease_time_progression, Record_dates, Noteid, types_to_replace, offset_notes):
answer_out = []
question_list = question.strip().split("##")
logical_form_orginal = logical_form
QLA = self.MakeAnswers(answertype, types_to_replace, question_list, logical_form, Disease_time_progression, Record_dates, Noteid, offset_notes)
if len(QLA) == 0:
return []
for values in QLA: # question,orginal
if len(values[2]) == 0:
paraphrase_questions = values[0]
unique_tup = list(set(paraphrase_questions))
#unique_tup = list(set(zip(paraphrase_questions, question_list)))
for qidx in range(len(unique_tup)):
self.filewriter_forlform.writerow([unique_tup[qidx][0]] + [values[1]] + [unique_tup[qidx][1]] + [logical_form_orginal])
else:
'''
answer_text = []
line_in_note = []
start_line = []
for answer in values[2]:
(text, inline_text, start_inline_text, start) = answer
if inline_text not in line_in_note:
answer_text.append(text)
line_in_note.append(inline_text)
start_line.append(start_inline_text)
Note_val = "#".join(line_in_note)
self.filewriter.writerow(
["##".join(list(zip(*values[0])[0]))] + [values[1]] + [",".join(answer_text)] + [Note_val] + [
Noteid + "_RiskChallenge"])
'''
paraphrase_questions = values[0]
unique_tup = list(set(paraphrase_questions))
#unique_tup = list(set(zip(paraphrase_questions, question_list)))
#print("unique_tup",unique_tup)
for qidx in range(len(unique_tup)):
#self.filewriter_forlform.writerow([unique_tup[qidx][0]] + [logical_form] + [unique_tup[qidx][1]] + [logical_form_orginal])
#print(unique_tup[qidx][0])
self.filewriter_forlform.writerow([unique_tup[qidx][0]] + [values[1]] + [unique_tup[qidx][1]] + [logical_form_orginal])
if set(list(zip(*values[0])[0])) not in self.unique_questions:
self.unique_questions.append(set(list(zip(*values[0])[0])))
ans_list = []
answers = values[2]
for idx in range(len(answers)):
(text, inline_text, start_inline_text, start) = answers[idx]
entity_type = "single"
val = {"answer_start": [start_inline_text, start], "text": text, "evidence": inline_text, "evidence_start": start_inline_text, "answer_entity_type": entity_type}
#print("idx \n")\
if val not in ans_list:
#if val["evidence"] != PatientNote[int(val["evidence_start"]):int(val["evidence_start"]) + len(val["evidence"])]:
# print(val["evidence"])
# print("line in note",PatientNote[int(val["evidence_start"]):int(val["evidence_start"]) + len(val["evidence"])])
ans_list.append(val) # evidence will have q_line_answer_line
answer_temp = {"answers": ans_list, "id": [values[0], logical_form_orginal],"question": list(list(zip(*values[0])[0]))}
answer_out.append(answer_temp)
return answer_out
######################## Main Utility Functions ########################################################
def MakeAnswers(self,answertype,types_to_replace,question_list,logical_form, Disease_time_progression, Record_dates,Noteid,offset_notes):
QLA = []
non_uniq = []
logical_form_orginal = logical_form
if answertype == "none":
annotations = self.InputMapping(types_to_replace,question_list,logical_form )
################# Generate only Question Logical Forms ##################################
for value in annotations:
#print(value)
logical_form_template = logical_form
new_question_list = []
paraphrase_questions = []
for question in question_list:
done = []
idx = 0
for types in list(types_to_replace):
# temp = qwords
index = question.find("|" + types + "|")
if index == -1 and types not in done:
if types == "medication":
question = question.replace("|treatment|","|medication|")
index = question.find("|" + "medication" + "|")
if index == -1 and types not in done:
print(question, "|" + types + "|", done)
else:
print(question, "|" + types + "|", done)
question = question.replace("|" + types + "|", value[idx])
done.append(types)
idx += 1
paraphrase_questions.append(question)
#print(question)
if question not in new_question_list:
new_question_list.append(question)
idx = 0
done = []
for types in list(types_to_replace):
index = logical_form_template.find("|" + types + "|")
if index == -1 and types not in done:
print(logical_form_template, "|" + types + "|", done, types)
done.append(types)
logical_form_template = logical_form_template.replace("|" + types + "|", value[idx])
idx += 1
#print(logical_form_template)
unique_tup = list(set(zip(paraphrase_questions, question_list)))
for qidx in range(len(unique_tup)):
#print(paraphrase_questions[0],logical_form_template)
self.filewriter_forlform.writerow([unique_tup[qidx][0]] + [logical_form_template] + [unique_tup[qidx][1]] + [logical_form_orginal])
return QLA
elif answertype == "result_date":
for (on_date_disease,record_date,note_offset) in zip(Disease_time_progression,Record_dates,offset_notes):
for Diseases in on_date_disease: ## Diseases has a list of Diseases keys
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
#print(inidcators)
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
#print(test)
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
test_name = dictionary[test][1]
disease_name = dictionary[test][0].lower()
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|test|", test_name)
logical_form_template = logical_form_template.replace("|date|", record_date[0])
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|test|", test_name)
question = question.replace("|date|", record_date[0])
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
if "during DCT" in time:
annotations = annotations[0:-2] + (annotations[-2]+note_offset, annotations[-1]+note_offset)
answers.append(annotations)
#print(annotations)
QLA.append((question_paraphrases,logical_form_template,answers,non_uniq))
#for value in test_annotations:
# test_annotations[]
elif answertype == "result_value_time":
year = []
month = []
day = []
for date in Record_dates:
try:
values = date[0].split("-")
if int(values[0]) not in year:
year.append(int(values[0]))
if int(values[1]) not in month:
month.append(int(values[1]))
if int(values[2]) not in day:
day.append(int(values[2]))
except:
values = date[0].split("/")
if int(values[2]) not in year:
year.append(int(values[2]))
if int(values[1]) not in month:
month.append(int(values[1]))
if int(values[0]) not in day:
day.append(int(values[0]))
if len(year) > 1:
time_val = str(max(year)-min(year)) + " years"
elif len(month) > 1:
time_val = str(max(month)-min(month)) + " months"
else:
time_val = str(max(day)-min(day)) + " days"
for key in disease_test:
Diseases = key
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
test_name = dictionary[test][1]
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|test|", test_name)
logical_form_template = logical_form_template.replace("|time|", time_val)
logical_form_template = logical_form_template.replace("|value|", test_value[test_name])
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|test|", test_name)
question = question.replace("|time|", time_val)
question = question.replace("|value|", test_value[test_name])
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease,note_offset) in zip(Disease_time_progression,offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
if "before DCT" in time or "during DCT" in time:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
#print(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
# for value in test_annotations:
# test_annotations[]
elif answertype == "results":
for key in disease_test:
Diseases = key
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
test_name = dictionary[test][1]
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|test|", test_name)
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|test|", test_name)
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease,note_offset) in zip(Disease_time_progression,offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
if "before DCT" in time or "during DCT" in time:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "test_problem":
for key in disease_test:
Diseases = key
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
test_name = dictionary[test][1]
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|test|", test_name)
logical_form_template = logical_form_template.replace("|problem|",Diseases)
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|test|", test_name)
question = question.replace("|problem|", Diseases)
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease, note_offset) in zip(Disease_time_progression, offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
if "before DCT" in time or "during DCT" in time:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "problem_result":
for key in disease_test:
Diseases = key
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
test_name = dictionary[test][1]
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|problem|",Diseases)
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|problem|", Diseases)
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease, note_offset) in zip(Disease_time_progression, offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
if "before DCT" in time or "during DCT" in time:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "test_date":
for key in disease_test:
Diseases = key
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
test_name = dictionary[test][1]
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|test|", test_name)
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|test|", test_name)
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease,record_date,note_offset) in zip(Disease_time_progression,Record_dates,offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
if "during DCT" in time:
record_date = record_date[0:-2] + (record_date[-2] + note_offset,record_date[-1]+note_offset)
answers.append(record_date)
#print(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "results_all":
for key in disease_test:
Diseases = key
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
test_name = dictionary[test][1]
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease, note_offset) in zip(Disease_time_progression, offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
if "during DCT" in time:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
#print(annotations)
QLA.append((question_paraphrases, logical_form, answers,non_uniq))
elif answertype == "test_date":
for key in disease_test:
Diseases = key
test_mentions = disease_test[Diseases]
for test in test_mentions: ## on "high bp...
test_name = dictionary[test][1]
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|test|", test_name)
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|test|", test_name)
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease, record_date, note_offset) in zip(Disease_time_progression, Record_dates,
offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators[test]:
time = inidcators[test][annotations][0]
if "during DCT" in time:
record_date = record_date[0:-2] + (record_date[-2] + note_offset,record_date[-1]+note_offset)
answers.append(record_date)
#print(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "disease_date":
for key in disease_test:
Diseases = key
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|problem|", key.lower())
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|problem|", key.lower())
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease, record_date, note_offset) in zip(Disease_time_progression, Record_dates,offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators["mention"]:
time = inidcators["mention"][annotations][0]
if "during DCT" in time and "before DCT" not in time and "after DCT" not in time :
record_date = record_date[0:-2] + (record_date[-2] + note_offset,record_date[-1]+note_offset)
answers.append(record_date)
#print(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "indicators":
for key in disease_test:
Diseases = key
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|problem|", key.lower())
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|problem|", key.lower())
if (question, orginal) not in question_paraphrases:
question_paraphrases.append((question, orginal))
non_uniq.append(question)
for (on_date_disease, note_offset) in zip(Disease_time_progression, offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators["mention"]:
time = inidcators["mention"][annotations][0]
if len(time) != 0:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "symptom":
key = "CAD"
logical_form_template = logical_form
logical_form_template = logical_form_template.replace("|problem|", key)
answers = []
question_paraphrases = []
for question in question_list:
orginal = question
question = question.replace("|problem|", key)
if (question,orginal) not in question_paraphrases:
question_paraphrases.append((question,orginal))
non_uniq.append(question)
for (on_date_disease, note_offset) in zip(Disease_time_progression, offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for annotations in inidcators["symptom"]:
time = inidcators["symptom"][annotations][0]
if len(time) != 0:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
# print(annotations)
QLA.append((question_paraphrases, logical_form_template, answers,non_uniq))
elif answertype == "medications_all":
for key in disease_test:
logical_form_template = logical_form
answers = []
for (on_date_disease, note_offset) in zip(Disease_time_progression, offset_notes):
Diseases = key
inidcators = on_date_disease[Diseases] ## Get all corresponding indicators for that problem
time = []
for med_type in self.types:
try:
out = inidcators[med_type]
except:
continue
for annotations in out:
time = inidcators[med_type][annotations][0]
if len(time) != 0:
annotations = annotations[0:-2] + (annotations[-2] + note_offset, annotations[-1] + note_offset)
answers.append(annotations)
# print(annotations)
print(question_list[0])
QLA.append([[(question_list[0],question_list[0])], logical_form_template, answers,question_list])
else:
print(answertype)
return QLA
def InputMapping(self, types_to_replace, logicalform, question_list):
annotations = []
if types_to_replace == ["test"]:
annotations = test_annotations
return annotations
elif types_to_replace == ["test", "date"]:
annotations = []
for test in test_annotations:
date = str(2000 + random.randint(0, 100)) + "-" + str(random.randint(1, 12)) + "-" + str(
random.randint(1, 28))
annotations.append([test[0], date])
elif types_to_replace == ["test", "time"]:
annotations = []
for test in test_annotations:
time = random.choice(["years ", "weeks "]) + str(random.randint(2, 5))
annotations.append([test[0], time])
elif types_to_replace == ["test", "time", "value"]:
annotations = []
for test in test_annotations:
time = random.choice(["years ", "weeks "]) + str(random.randint(2, 5))
annotations.append([test[0], time, test_value[test[0]]])
elif types_to_replace == ["medication"] or types_to_replace == ["treatment"]:
annotations = [[meds] for meds in self.list_medications]
elif types_to_replace == ["problem"]:
annotations = [[prob] for prob in problem_annotations]
elif types_to_replace == ["test", "problem"]:
annotations = []
for problem in disease_test:
for test in disease_test[problem]:
annotations.append([dictionary[test][1], problem])
elif types_to_replace == ["time"]:
time = random.choice(["years ", "weeks "]) + str(random.randint(2, 5))
annotations.append([time])
elif types_to_replace == ["none"]:
pass
else:
print(types_to_replace)
return annotations
###################################### Supporting Utility Functions #######################################
def checking_for_errors(self, question_list, logical_form_template):
question_list = question_list.split("##")
qwords_list = []
dup_rwords_list = []
unique_templates = []
# logical_form_template = logical_form_template.replace("|treatment|", "|medication|").strip()
for question in question_list:
if question.strip() == "":
continue
# question = question.replace("|medication| or |medication|", "|medication|")
# question = question.replace("|treatment|", "|medication|").strip()
# logical_form_template.replace()
if question not in unique_templates:
unique_templates.append(question)
else:
continue
qtemplate = question
qwords = question.split("|")
dup_rwords = qwords[1:len(qwords):2]
qwords_list.append(qwords)
if len(dup_rwords_list) == 0:
dup_rwords_list = [set(dup_rwords)]
else:
if set(dup_rwords) not in dup_rwords_list:
question = question.replace("|treatment|", "|medication|").strip()
qwords = question.split("|")
dup_rwords = qwords[1:len(qwords):2]
if set(dup_rwords) not in dup_rwords_list:
print("Error Out Of Context Question:")
print(question, logical_form_template, question_list)
return []
lwords = logical_form_template.split("|")
dup_lrwords = lwords[1:len(lwords):2]
if set(dup_lrwords) not in dup_rwords_list:
print("Error Out Of Context Question-Logical Form Pairs:")
print(question_list, logical_form_template)
return []
if len(dup_rwords_list) != 1:
print("Check Question_Logical Form Mapping")
print(dup_rwords_list, question_list)
print(logical_form_template)
return []
return dup_rwords_list
## viz function ##
def WriteTimeData(self):
OutputFile = "TimeSeriesRiskData.csv"
ofile = open(OutputFile, "w")
writer = csv.writer(ofile)
for patient_id in self.Patients:
for var in ["glucose", "A1C", "mention"] + self.Medications:
timeline = [patient_id, var]
heading = ["patient_id", "variable"]
# print(len(self.Patients[patient_id]))
for idx in range(len(self.Patients[patient_id])):
# looping over the dates
# (before,current,after) the date tuple
date = self.Patients[patient_id][idx][1]
# print(date)
heading.extend(["before " + date, date, "after " + date])
# print(heading)
# print(date)
values = [[], [], []]
event_dictionary = self.Patients[patient_id][idx][2]
for keys in event_dictionary["Diabetes"][var]:
# print(keys)
# print(event_dictionary["Diabetes"][var])
if "continuing" in event_dictionary["Diabetes"][var][keys]:
# values[0] += " # "+keys[0]
# values[1] += " # "+keys[0]
# values[2] += " # "+keys[0]
flag = 0
out = zip(*values[0])
if len(out) == 0:
out = []
for word in out[0]:
if keys[0] in word:
flag = 1
break
if flag == 0:
values[0].append((keys[0], keys[1]))
else:
if keys[1] not in out[1]:
values[0].append((keys[0], keys[1]))
flag = 0
out = zip(*values[1])
if len(out) == 0:
out = ["", ""]
for word in out[0]:
if keys[0] in word:
flag = 1
break
if flag == 0:
values[1].append((keys[0], keys[1]))
else:
if keys[1] not in out[1]:
values[1].append((keys[0], keys[1]))
flag = 0
out = zip(*values[2])
if len(out) == 0:
out = ["", ""]
for word in out[0]:
if keys[0] in word:
flag = 1
break
if flag == 0:
values[2].append((keys[0], keys[1]))
else:
if keys[1] not in out[1]:
values[2].append((keys[0], keys[1]))
else:
if "after DCT" in event_dictionary["Diabetes"][var][keys]:
# values[2] += " # "+keys[0]
flag = 0
out = zip(*values[2])
if len(out) == 0:
out = ["", ""]
for word in out[0]:
if keys[0] in word:
flag = 1
break
if flag == 0:
values[2].append((keys[0], keys[1]))
else:
if keys[1] not in out[1]:
values[2].append((keys[0], keys[1]))
if "before DCT" in event_dictionary["Diabetes"][var][keys]:
# values[0] += " # "+keys[0]
flag = 0
out = zip(*values[0])
if len(out) == 0:
out = ["", ""]
for word in out[0]:
if keys[0] in word:
flag = 1
break
if flag == 0:
values[0].append((keys[0], keys[1]))
else:
if keys[1] not in out[1]:
values[0].append((keys[0], keys[1]))
if "during DCT" in event_dictionary["Diabetes"][var][keys]:
# values[1] += " # "+keys[0]
flag = 0
out = zip(*values[1])
if len(out) == 0:
out = ["", ""]
for word in out[0]:
if keys[0] in word:
flag = 1
break
if flag == 0:
values[1].append((keys[0], keys[1]))
else:
if keys[1] not in out[1]:
values[1].append((keys[0], keys[1]))
if "not mentioned" in event_dictionary["Diabetes"][var][keys]:
print("not mentioned occurence")
timeline.extend(values)
if var == "glucose":
writer.writerow(heading)
writer.writerow(timeline)
writer.writerow([""])
RiskFileAnalysis()
| 105,939
| 46.18931
| 185
|
py
|
emrQA
|
emrQA-master/generation/i2b2_medications/medication-answers.py
|
import csv
import os
from os import listdir
from os.path import isfile, join
import json
import random
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--i2b2_dir', default='', help='Directory containing i2b2 medications challange files')
parser.add_argument('--templates_dir', default='', help='Directory containing template files in the given format')
parser.add_argument('--output_dir', default='', help='Directory to store the output')
args = parser.parse_args()
###################################################### SET FILE PATHS ##################################################################
## i2b2 file paths ##
DosageFilePath = [ os.path.join(args.i2b2_dir,"annotations_ground_truth/converted.noduplicates.sorted/"), os.path.join(args.i2b2_dir,"training.ground.truth/")]
MedicationClinicalNotes = [os.path.join(args.i2b2_dir,"train.test.released.8.17.09/")]
## template file path ##
template_file_path = args.templates_dir
## output file paths ##
ql_output = os.path.join(args.output_dir,"medication-ql.csv")
medications_qa_output_json = os.path.join(args.output_dir,"medication-qa.json")
######################################################## CODE #########################################################################
class GenerateQA():
DosageFilePath = DosageFilePath
MedicationClinicalNotes = MedicationClinicalNotes
def __init__(self):
self.ReadMedicationData()
self.ReadTemplates()
######################### Read i2b2 file functions ###################################
def ReadMedicationData(self):
## based on format of the i2b2 files. please refer to the i2b2 medications challenge documentation for details ###
abbs = {"m": "medication", "do": "dosage", "mo": "mode", "f": "frequency", "du": "duration", "r": "problem",
"e": "event", "t": "temporal", "c": "certainty", "ln": "list"}
exception = ["list", "event", "temporal",
"certainty"] ## very few annotations are tagged with these, hence we willl ignore them.
self.MedicationData = []
ClinicalNotes = {}
## read the clinical notes ##
for paths in self.MedicationClinicalNotes:
files = [f for f in listdir(paths) if isfile(join(paths, f))]
for file in files:
remote_file = open(paths + file)
ClinicalNotes[file.strip()] = remote_file.readlines()
## read the annotations per clinical note (parse the files) ##
annotations_span = []
for paths in self.DosageFilePath:
files = [f for f in listdir(paths) if isfile(join(paths, f))]
for file in files:
remote_file = open(paths + file)
note_id = file.split(".")[0]
note_id = note_id.split("_")[0]
# print(file)
dictionary = {note_id: []}
PatientNote = ClinicalNotes[note_id] ## access the corresponding clinical note.
flag = 0
for line in remote_file:
med_list = {}
line = line.replace("|||", "||")
words = line.split("||")
for word in words:
term = word.split("=")
try:
type = abbs[term[0].strip()] ## check if all of them lie within the given annotation list
except:
print(paths + file)
flag = 1
break
full_annotation = "=".join(term[1:])
index = [pos for pos, char in enumerate(full_annotation) if char == "\""]
pos1 = int(index[0])
pos2 = int(index[-1])
annotation = full_annotation[pos1 + 1:pos2]
indxs = full_annotation[pos2 + 1:].split(",")
line_in_note = ""
start_line = None
if annotation == "nm" or type in exception:
med_list[type] = [annotation, line_in_note, start_line]
continue
# print(word,annotation,indxs)
# print(indxs)
for indx in indxs:
indx = indx.strip()
out = indx.split(" ")
start_line = out[0].split(":")[0]
start_token = out[0].split(":")[1]
end_line = out[1].split(":")[0]
end_token = out[1].split(":")[1]
line_in_note += "".join(PatientNote[int(start_line) - 1:int(end_line)])
# if int(end_line) > int(start_line):
# print(type)
# print(line)
# print(end_line,start_line)
## some end line number are greater than start line numbers. annotation line_in_note can span upto 3 lines
## annotation can be discontinous set of tokens
med_list[type] = [annotation, line_in_note, start_line, start_token]
# if start_line != end_line:
# print(int(end_line)-int(start_line))
# print(line_in_note)
dictionary[note_id].append(med_list)
remote_file.close()
if flag == 0:
if (dictionary, PatientNote) not in self.MedicationData:
self.MedicationData.append((dictionary, PatientNote))
# print(annotations_span)
######################## Main program functions ##########################################
def ReadTemplates(self):
self.medications_out = {"paragraphs": [], "title": "medication"}
self.logical_out = []
########################################## Set File Paths ##############################################
### File to write Question-Logical Forms ##
ofile = open(ql_output, "w")
self.filewriter_forlform = csv.writer(ofile, delimiter="\t")
self.filewriter_forlform.writerow(["Question", "Logical Form"])
### File to read templates ###
file = open(template_file_path)
filereader = list(csv.reader(file))
## read only templates relevant to medications challenge ##
med_lines = []
for line in filereader[1:]:
if line[0] != "medication" and line[0] != "medications":
continue
med_lines.append(line)
########################################## Main Function Call ##############################################
for (dictionary,PatientNote) in self.MedicationData:
for note_id in dictionary:
out_patient = {"note_id": note_id, "context": PatientNote, "qas": []}
med_list = dictionary[note_id] ## extract all the annotations given per note ##
## create one to many mappings, to use them for QA. Coreference not resolved ##
self.MakeMedicationRelationMappings(med_list)
flag = 0
self.unique_questions = []
question_id = 0
for line in med_lines:
## do +1 for the new format ##
question = line[2].strip()
logical_form = line[3].strip()
answertype = line[4].split(",")
answertype = [type.strip() for type in answertype]
#question = question.replace("|problem| or |problem|","|problem|")
question = question.replace("|medication| or |medication|", "|medication|")
question = question.replace("|problem| or |problem|", "|problem|")
question = question.replace("|test| or |test|", "|test|")
question = question.replace("|test| |test| |test|", "|test|")
question = question.replace("\t", "")
logical_form = logical_form.replace("\t", "")
if question.strip() == "":
continue
answer_out = self.MakeMedicationQLA(question,logical_form,answertype,med_list,flag,note_id,PatientNote,question_id)
if len(answer_out) != 0:
#for answer in answer_out:
#print(answer["id"])
out_patient["qas"].extend(answer_out)
self.medications_out["paragraphs"].append(out_patient)
################################################################# Dump JSON ###########################################
json_out = medications_qa_output_json
with open(json_out, 'w') as outfile:
json.dump(self.medications_out, outfile, ensure_ascii=False) ## storage format same as SQUAD
#json_out = medications_ql_output_json
#with open(json_out, 'w') as outfile:
# json.dump(self.logical_out, outfile, ensure_ascii=False) ## storage format, question logical_form question_id logicalfrom_id source
def MakeMedicationQLA(self, question_list, logical_form_template, answertype, med_list, flag, note_id, PatientNote, question_id):
answer_out = []
## save a copy of the orginals ##
intial_question_list = question_list.split("##")
intial_template = logical_form_template
orginal_logical_form_template = logical_form_template.strip()
## check for errors in templates and gather all the placeholders in the templates (placeholders stored in rwords) ##
## semantic types of placeholders ##
dup_rwords_list = self.CheckForErrors(intial_question_list, orginal_logical_form_template)
if dup_rwords_list == None:
return answer_out
for med_annotations in med_list: ## Medlist is a list of dictionaries (each dict is a medication and its attributes)
flag = 0
logical_form_template = orginal_logical_form_template
if len(dup_rwords_list) != 1: ## sanity check
print("Check Question_Logical Form Mapping")
print(dup_rwords_list, intial_question_list)
print(logical_form_template)
return answer_out
else:
dup_rwords = dup_rwords_list[0]
rwords = list(dup_rwords)
line_num = []
line_token = []
question_line = []
quest_list_nar = []
answer = []
### checking if placeholder values to be used in question is "nm" (not mentioned), if yes set flag to 1 ##
if rwords != ["time"]:
for idx in range(len(rwords)):
if rwords[idx] == "treatment":
rwords[idx] = "medication"
if med_annotations[rwords[idx]][0] == "nm":
flag = 1
break
else:
line_num.append(int(med_annotations[rwords[idx]][2]))
line_token.append(int(med_annotations[rwords[idx]][3]))
question_line.append(med_annotations[rwords[idx]][1])
rwords[idx] = med_annotations[rwords[idx]][0]
quest_list_nar.append(med_annotations["list"][0])
## Generate question, logical form and answer only if flag is 0 ##
if flag == 0:
[paraphrase_questions, tuple_orginal, logical_form] = self.MakeMedicationQL(rwords,
intial_question_list,
logical_form_template,
dup_rwords)
[answer, answer_line, result_num, result_token, list_nar] = self.MakeAnswer(quest_list_nar, answertype,
med_annotations,
question_line, line_num,
line_token)
else:
continue
# return answer_out #### bug fixed ##
if len(answer) != 0:
if answertype == ["medication", 'dosage']:
entity_type = "complex"
elif answertype == ["yes"]:
entity_type = "empty"
else:
entity_type = "single"
unique_paras = set(paraphrase_questions)
if unique_paras not in self.unique_questions: ## redundancy check: checking if these set of questions are unique for every clinical note ##
self.unique_questions.append(unique_paras)
question_id += 1
ans_list = []
for idx in range(len(answer)):
start_line = result_num[idx]
start_token = result_token[idx]
val = {"answer_start": [start_line, start_token], "text": answer[idx],
"evidence": answer_line[idx], "evidence_start": result_num[idx], "answer_entity_type": entity_type}
if val not in ans_list:
ans_list.append(val)
## ""evidence"" in the dictionary above is currently just the answer line in the note. You can also consider question line and answer line from note as evidence in that uncomment below code and use it accordingly #
'''
## maximum distance between the question line and answer line ##
perms = list(itertools.product(result_num+line_num, result_num+line_num))
diffs = [abs(val1 - val2) for (val1, val2) in perms]
difference = max(diffs)
Note_val = "#".join(answer_line)
list_nar = ",".join(list_nar)
## evidence per answer ##
evidence_answer = []
evidence_start = []
evidence_temp_line = answer_line
evidence_temp_start = result_num
for pdx in range(len(evidence_temp_line)):
if evidence_temp_line[pdx] not in evidence_answer:
evidence_answer.append(evidence_temp_line[pdx])
evidence_start.append(evidence_temp_start[pdx])
val = {"answer_start": [start_line, start_token], "text": answer[idx],
"evidence": evidence_answer,
"evidence_start": evidence_start}
if qa_csv_write:
self.filewriter.writerow(
["##".join(list(unique_paras))] + [logical_form] + [",".join(set(answer))] + [Note_val] + [note_id + "_MedicationsChallenge"] + [difference] + [list_nar])
'''
answer_temp = {"answers": ans_list, "id": [tuple_orginal, intial_template],
"question": list(unique_paras)}
answer_out.append(answer_temp)
return answer_out
######################## Main Utility Functions ######################################
def MakeMedicationRelationMappings(self,med_list):
self.map_meds_to_reasons = {}
self.map_meds_to_dosages = {}
self.map_meds_to_frequency = {}
self.map_reasons_to_meds = {}
self.map_meds_to_durations = {}
self.medications_all = {}
for med_annotations in med_list:
if med_annotations["medication"][0] not in self.medications_all:
self.medications_all[med_annotations["medication"][0]] = [med_annotations["medication"]]
#print(med_annotations["medication"])
if med_annotations["medication"][0] not in self.map_meds_to_dosages:
self.map_meds_to_dosages[med_annotations["medication"][0]] = []
if med_annotations["medication"][0] not in self.map_meds_to_frequency:
self.map_meds_to_frequency[med_annotations["medication"][0]] = []
if med_annotations["medication"][0] not in self.map_meds_to_reasons:
self.map_meds_to_reasons[med_annotations["medication"][0]] = []
if med_annotations["problem"][0] != "nm":
if med_annotations["problem"][0] not in self.map_reasons_to_meds:
self.map_reasons_to_meds[med_annotations["problem"][0]] = []
if med_annotations["medication"][0] not in self.map_meds_to_durations:
self.map_meds_to_durations[med_annotations["medication"][0]] = []
if med_annotations["dosage"][0] != "nm":
#if med_annotations["event"] == ""
if med_annotations["dosage"]+med_annotations["list"] not in self.map_meds_to_dosages[med_annotations["medication"][0]]:
self.map_meds_to_dosages[med_annotations["medication"][0]].append(med_annotations["dosage"]+med_annotations["list"])
if med_annotations["problem"][0] != "nm":
self.map_meds_to_reasons[med_annotations["medication"][0]].append(med_annotations["problem"]+med_annotations["list"])
if med_annotations["problem"][0] != "nm":
self.map_reasons_to_meds[med_annotations["problem"][0]].append(med_annotations["medication"]+med_annotations["list"])
if med_annotations["frequency"][0] != "nm":
self.map_meds_to_frequency[med_annotations["medication"][0]].append(med_annotations["frequency"]+med_annotations["list"])
if med_annotations["duration"][0] != "nm":
self.map_meds_to_durations[med_annotations["medication"][0]].append(med_annotations["duration"]+med_annotations["list"])
def MakeMedicationQL(self, rwords, question_list, logical_form_template, dup_rwords):
intial_template = logical_form_template
paraphrase_questions = []
tuple_orginal = []
if rwords == ["time"]:
time = str(random.randint(2, 5)) + random.choice([" years", " weeks"])
for question in question_list:
original = question
question = question.replace("|time|", time)
logical_form_template = logical_form_template.replace("|time|", time)
rwords = []
dup_rwords = []
paraphrase_questions.append(question)
tuple_orginal.append((question, original))
else:
############################ make questions ############################################
for question in question_list:
orginal = question
idx = 0
done = []
for types in list(dup_rwords):
# temp = qwords
index = question.find("|" + types + "|")
if index == -1 and types not in done:
print(question, "|" + types + "|", done)
question = question.replace("|" + types + "|", rwords[idx])
done.append(types)
idx += 1
tuple_orginal.append((question, orginal))
paraphrase_questions.append(question)
###################################### Make Logical Form #################################
## tab ##
idx = 0
done = []
for types in list(dup_rwords):
logical_form_template.replace("|treatment|", "|medication")
index = logical_form_template.find("|" + types + "|")
if index == -1 and types not in done:
print(logical_form_template, "|" + types + "|", done, types)
done.append(types)
logical_form_template = logical_form_template.replace("|" + types + "|", rwords[idx])
idx += 1
logical_form = logical_form_template
### Writing question-logical form ##
for (question, orginal) in tuple_orginal:
self.filewriter_forlform.writerow([question] + [logical_form.strip()] + [orginal.strip()] + [intial_template])
return [paraphrase_questions, tuple_orginal, logical_form]
def MakeAnswer(self, quest_list_nar, answertype, med_annotations, question_list,line_num,line_token):
result_num = []
result_token = []
answer_line = []
list_nar = quest_list_nar
answer = []
idx = 0
if answertype[idx] == "yes":
### the question line is evidence for yes or no questions ##
#answer = ["yes"]*len(question_list)
answer = [""] * len(question_list)
answer_line.extend(question_list)
result_num.extend(line_num)
#result_token.extend(line_token)
result_token = [""] * len(question_list)
list_nar.extend(quest_list_nar)
elif answertype == ["problem"]:
for listr in self.map_meds_to_reasons[med_annotations["medication"][0]]:
answer += [listr[0]]
answer_line.append(listr[1])
result_num.append(int(listr[2]))
result_token.append(int(listr[3]))
list_nar.append(listr[3])
elif answertype == ["frequency"]:
# print("frequency")
for listr in self.map_meds_to_frequency[med_annotations["medication"][0]]:
answer += [listr[0]]
answer_line.append(listr[1])
result_num.append(int(listr[2]))
result_token.append(int(listr[3]))
list_nar.append(listr[3])
elif answertype == ["dosage"]:
for med in [med_annotations["medication"][0]]:
for listr in self.map_meds_to_dosages[med]:
answer += [listr[0]]
answer_line.append(listr[1])
result_num.append(int(listr[2]))
result_token.append(int(listr[3]))
list_nar.append(listr[3])
elif answertype == ["medication"]:
for listr in self.map_reasons_to_meds[med_annotations["problem"][0]]:
answer += [listr[0]]
answer_line.append(listr[1])
result_num.append(int(listr[2]))
result_token.append(int(listr[3]))
list_nar.append(listr[3])
elif answertype == ["medication", 'dosage']:
meds = self.map_reasons_to_meds[med_annotations["problem"][0]]
for med in meds:
#dos = ",".join([x[0] for x in self.map_meds_to_dosages[med[0]]])
#answer += ["( " + med[0] + ", " + dos + ")"]
answer.append([med[0]])
answer_line.append([med[1]])
result_num.append([int(med[2])])
result_token.append([int(med[3])])
list_nar.append([med[3]])
for x in self.map_meds_to_dosages[med[0]]:
#if x[1] not in answer_line[-1]:
answer[-1].extend([x[0]])
answer_line[-1].extend([x[1]])
result_num[-1].extend([int(x[2])])
result_token[-1].extend([int(x[3])])
list_nar[-1].extend([x[4]])
#print("new medicine")
#print(answer[-1])
#print(result_num[-1])
#print(result_token[-1])
#print(answer_line[-1])
#result_num[-1].extend([int(x[2]) for x in self.map_meds_to_dosages[med[0]] if int(x[2]) not in result_num[-1]])
#result_token[-1].extend([int(x[3]) for x in self.map_meds_to_dosages[med[0]]])
#list_nar.extend([x[3] for x in self.map_meds_to_dosages[med[0]]])
elif answertype == ["duration"]:
for listr in self.map_meds_to_durations[med_annotations["medication"][0]]:
answer += [listr[0]]
answer_line.append(listr[1])
result_num.append(int(listr[2]))
result_token.append(int(listr[3]))
list_nar.append(listr[3])
elif answertype == ["medications_all"]:
for medication_name in self.medications_all:
listr = self.medications_all[medication_name][0]
answer += [listr[0]]
answer_line.append(listr[1])
result_num.append(int(listr[2]))
result_token.append(int(listr[3]))
list_nar.append(listr[3])
elif answertype == ["none"]:
pass
else:
print(answertype)
answer = []
return [answer,answer_line, result_num, result_token, list_nar]
######################## Supporting Utility Functions ######################################
def CheckForErrors(self, question_list, logical_form_template):
## gather all the placeholders in the templates ##
dup_rwords_list = []
unique_templates = []
qwords_list = []
## check if all the questions paraphrases have the same placeholders ##
for question in question_list:
if question.strip() == "":
continue
question = question.replace("|medication| or |medication|", "|medication|")
question = question.replace("|problem| or |problem|", "|problem|")
question = question.replace("|test| or |test|", "|test|")
question = question.replace("|test| |test| |test|", "|test|")
question = question.strip()
if question not in unique_templates:
unique_templates.append(question)
else:
continue
qwords = question.split("|")
dup_rwords = qwords[1:len(qwords):2]
qwords_list.append(qwords)
if len(dup_rwords_list) == 0:
dup_rwords_list = [set(dup_rwords)]
else:
if set(dup_rwords) not in dup_rwords_list:
print("Error Out Of Context Question:")
print(question, logical_form_template, question_list)
return None
## Check if the placeholders in logical forms are same as the placeholders in question ##
lwords = logical_form_template.split("|")
dup_lrwords = lwords[1:len(lwords):2]
if set(dup_lrwords) not in dup_rwords_list:
print("Error Out Of Context Question-Logical Form Pairs:")
print(question_list, logical_form_template)
return None
return dup_rwords_list
if __name__=="__main__":
GenerateQA()
| 27,595
| 43.509677
| 238
|
py
|
emrQA
|
emrQA-master/generation/combine_data/combine_answers.py
|
import json
import csv
import random
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default='/home/anusri/Desktop/emrQA/output/', help='Directory of output files')
args = parser.parse_args()
###################################################### SET FILE PATHS ##################################################################
medications = json.load(open(os.path.join(args.output_dir,"medication-qa.json")))
relations = json.load(open(os.path.join(args.output_dir,"relations-qa.json")), encoding="latin-1")
risk = json.load(open(os.path.join(args.output_dir,"risk-qa.json")))
smoking = json.load(open(os.path.join(args.output_dir,"smoking-qa.json")))
obesity = json.load(open(os.path.join(args.output_dir,"obesity-qa.json")))
######################################################## CODE #########################################################################
data = [medications, relations, risk, smoking, obesity]
#data = [relations]
data_out = {"data": data}
json_out = os.path.join(args.output_dir,"data.json")
with open(json_out, 'w') as outfile:
json.dump(data_out, outfile, encoding="latin-1")
total_clinical_notes = 0
all_questions = []
all_clinical_notes = []
for dataset in data:
for note in dataset["paragraphs"]:
total_clinical_notes += 1
if " ".join(note["context"]) not in all_clinical_notes:
all_clinical_notes.extend([" ".join(note["context"])])
else:
#print("repeat")
continue
for questions in note["qas"]:
#print(questions["question"])
all_questions.append(list(set(questions["question"]))) # all questions
out = []
count = {}
print("Total Clinical Notes", len(all_clinical_notes))
total_question = len(all_questions)
totals = 0
questions_list = []
for value in all_questions:
#print(value)
if type(value) != list:
print("error")
if len(value[0]) == 1:
print(value)
#out.append([len(value[0]),len(value),"\t".join(value)])
#if len(value) not in count:
# count[len(value)] = []
totals += len(value)
questions_list.extend(value)
'''
print(len(count))
new_list = sorted(out, key=lambda x: x[1], reverse=True)
ofile = open("testing","w")
for val in new_list:
ofile.write("\t".join(map(str,val)))
ofile.write("\n")
ofile.close()
'''
## Average Question Length ##
print("Total Number Of Questions", totals)
print("Total number of question types", total_question)
##################################################################################################################################
medications = os.path.join(args.output_dir,"medication-ql.csv")
relations = os.path.join(args.output_dir,"relations-ql.csv")
risk = os.path.join(args.output_dir,"risk-ql.csv")
smoking = os.path.join(args.output_dir,"smoking-ql.csv")
obesity = os.path.join(args.output_dir,"obesity-ql.csv")
data = [medications, relations, risk, smoking, obesity]
unique = set()
for file_path in data:
file = open(file_path)
filereader = list(csv.reader(file))
for line in filereader[1:]:
unique.add(tuple(line))
#if random.randint(1,100) < 10:
#print(line)
values = list(unique)
print("Total number of QL forms", len(values))
final_out = os.path.join(args.output_dir,"data-ql.csv")
ofile = open(final_out, "w")
writer = csv.writer(ofile, delimiter="\t")
writer.writerow(["Question", "Logical Form", "QTemplate", "LTemplate"])
for val in values:
writer.writerow(val)
ofile.close()
'''
datasets = json.load(open("data.json"))
for dataset in datasets:
print(dataset["title"])
for ClinicalNote in dataset["paragraphs"]:
NoteText = "\n".join(ClinicalNote["context"])
for questions in ClinicalNote["qas"]:
paraphrase_questions = questions["question"]
print(paraphrase_questions)
for answer in questions["answers"]:
answer_text = answer["text"]
answer_start = answer["answer_start"] ## [start_line,start_token] from NoteText
evidence = answer["evidence"] ## The evidence here is question line + answer line (the evidence we use as ground truth is start_line from answer_start)
print(answer_text,answer_start,evidence)
'''
'''
use_evidence_model = "True"
paras = []
idx = 0
for note in medications["paragraphs"]:
if medications["title"] == "risk-dataset":
text = "\n".join(note["context"])
para = {"context": text, "qas": []}
for questions in note["qas"]:
idx += 1 ## Take care of this
question = {"question": questions["question"], "answers": [], "id": idx}
if use_evidence_model == "True":
for answer in questions["answers"]:
question["answers"].append({"text": answer["evidence"], "answer_start": answer["answer_start"][0]}) ## the answer line
else:
for answer in questions["answers"]:
question["answers"].append({"text": answer["text"], "answer_start": answer["answer_start"][1]}) ## the answer text
else:
text = "".join(note["context"])
line_lenth = [len(line) for line in note["context"]]
para = {"context": text, "qas": []}
for questions in note["qas"]:
idx += 1
print(questions["id"])
question = {"question": questions["question"], "answers": [], "id": idx}
for answer in questions["answers"]:
if use_evidence_model == "True":
try: ## evidence and evidence start token
question["answers"].append({"text":note["context"][answer["answer_start"][0]-1],"answer_start":sum(line_lenth[answer[:answer["answer_start"][0]-1]])})
except:
unique = []
for num in list(map(lambda x: x - 1, answer["evidence_start"])):
if num not in unique:
unique.append(num)
question["answers"].append({"text":note["context"][num],"answer_start":sum(line_lenth[:num])})
else:
try: ## answer and answer start token
question["answers"].append({"text": answer["text"],
"answer_start": sum(
line_lenth[answer[:answer["answer_start"][0] - 1]])+answer["answer_start"][1]})
except:
unique = []
for num in list(map(lambda x: x - 1, answer["evidence_start"])):
if num not in unique:
unique.append(num)
question["answers"].append(
{"text": note["context"][num], "answer_start": sum(line_lenth[:num])})
para["qas"].append(question)
paras.append(para)
medications_new = {"paragraphs": paras, "title": "medications"}
#file = open("file.json", "w")
data = {}
data["data"] = [medications_new]
output = {'qids': [], 'questions': [], 'answers': [],
'contexts': [], 'qid2cid': []}
for article in data["data"]:
for paragraph in article['paragraphs']:
output['contexts'].append(paragraph['context'])
for qa in paragraph['qas']:
output['qids'].append(qa['id'])
#print(qa["question"])
output['questions'].append(qa['question'])
output['qid2cid'].append(len(output['contexts']) - 1)
if 'answers' in qa:
output['answers'].append(qa['answers'])
#print(qa['answers'])
json_out = "data_squad_format.json"
with open(json_out, 'w') as outfile:
json.dump(data, outfile, encoding="utf-8")
'''
| 7,952
| 34.346667
| 174
|
py
|
emrQA
|
emrQA-master/generation/i2b2_smoking/smoking-answers.py
|
import xmltodict
import csv
import json
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--i2b2_dir', default='', help='Directory containing i2b2 smoking challange files')
parser.add_argument('--templates_dir', default='', help='Directory containing template files in the given format')
parser.add_argument('--output_dir', default='', help='Directory to store the output')
args = parser.parse_args()
###################################################### SET FILE PATHS ##################################################################
templates_file = args.templates_dir
i2b2_file_paths = args.i2b2_dir
ql_output = os.path.join(args.output_dir,"smoking-ql.csv")
qa_output = os.path.join(args.output_dir,"smoking-qa.json")
file_names = ["smokers_surrogate_test_all_groundtruth_version2.xml","smokers_surrogate_train_all_version2.xml"]
######################################################## CODE #########################################################################
def ReadFile():
file_path = i2b2_file_paths
status = []
for file_name in file_names:
file = file_path + file_name
with open(file) as fd:
XML = xmltodict.parse(fd.read())
idx = 0
for key in XML["ROOT"]["RECORD"]:
idx += 1
patient_id = key["@ID"]
answer_class = key["SMOKING"]["@STATUS"]
patient_note = key["TEXT"]
status.append([patient_id,answer_class,patient_note])
return status
def MakeJSONOutput(smoking_data, json_out, status, filewriter_forlform):
smoking_out = {"paragraphs": [], "title": "smoking"}
for state in status:
patient_id = state[0]
patient_note = state[2]
out = {"note_id": patient_id, "context": patient_note, "qas": []}
for row in smoking_data:
question = row[2].strip()
form = row[3].strip()
answer_type = row[4]
if question == "":
continue
question_list = question.split("##")
for q in question_list:
filewriter_forlform.writerow([q, form, q, form])
if answer_type == "smoke_class":
out["qas"].append({"answers": [{"answer_start": "", "text": state[1], "evidence": "", "evidence_start": ""}],
"id": [zip(question_list, question_list), form], "question": question_list})
smoking_out["paragraphs"].append(out)
with open(json_out, 'w') as outfile:
json.dump(smoking_out, outfile)
if __name__=="__main__":
### Read i2b2 files, one status per clinical note ###
status = ReadFile()
### File to read templates ###
filereader = list(csv.reader(open(templates_file)))
## read only templates relevant to smoking challenge ##
smoking_lines = []
for line in filereader[1:]:
if line[0] != "smoking" and line[0] != "smoking":
continue
smoking_lines.append(line)
ofile = open(ql_output, "w")
filewriter_forlform = csv.writer(ofile, delimiter="\t")
filewriter_forlform.writerow(["Question", "Logical Form"])
MakeJSONOutput(smoking_lines, qa_output, status, filewriter_forlform)
#MakeQuestion(smoking_lines,out_file,status)
'''
def MakeQuestion(smoking_data,out_file,status):
ofile = open(out_file,"w")
ofilewriter = csv.writer(ofile)
values = ["Question", "Answer" , "Answer line in note", "Note ID", "Difference in QA lines"]
ofilewriter.writerow(values)
for row in smoking_data:
#print(row)
question = row[1].strip()
#print(row)
answer_type = row[3]
if answer_type == "smoke_class":
for state in status:
values = [question, state[1],"",state[0],""]
patient_id = status[0]
patient_note = status[2]
ofilewriter.writerow(values)
elif answer_type == "None":
#return []
pass
else:
print(answer_type)
'''
| 4,073
| 29.631579
| 136
|
py
|
emrQA
|
emrQA-master/generation/i2b2_obesity/obesity-answers.py
|
import xmltodict
import csv
import json
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--i2b2_dir', default='', help='Directory containing i2b2 obesity challange files')
parser.add_argument('--templates_dir', default='', help='Directory containing template files in the given format')
parser.add_argument('--output_dir', default='', help='Directory to store the output')
args = parser.parse_args()
###################################################### SET FILE PATHS ##################################################################
templates_file = args.templates_dir
obesity_file_path = i2b2_file_paths = args.i2b2_dir
file_names = ["obesity_standoff_annotations_test.xml","obesity_standoff_annotations_training.xml"]
note_names = ["obesity_patient_records_test.xml", "obesity_patient_records_training.xml"]
ql_output = os.path.join(args.output_dir,"obesity-ql.csv")
#print(ql_output)
qa_json_out = os.path.join(args.output_dir,"obesity-qa.json")
######################################################## CODE #########################################################################
def ReadFile():
file_path = obesity_file_path
Patient = {} #note_id is the key with a dictionary as value
for note_name in note_names:
file = file_path + note_name
with open(file) as fd:
XML = xmltodict.parse(fd.read())
for doc in XML["root"]["docs"]["doc"]:
doc_id = doc["@id"]
note_text = doc["text"]
if doc_id not in Patient:
Patient[doc_id] = {}
Patient[doc_id]["text"] = note_text
for file_name in file_names:
file = file_path + file_name
with open(file) as fd:
XML = xmltodict.parse(fd.read())
intuitive = XML["diseaseset"]["diseases"][0]["disease"]
textual = XML["diseaseset"]["diseases"][1]["disease"]
#print(intuitive)
for idx in range(len(intuitive)):
disease_name = intuitive[idx]["@name"]
intuitive_docs_list = intuitive[idx]["doc"]
for pidx in range(len(intuitive_docs_list)):
idoc_id = intuitive_docs_list[pidx]["@id"]
ijudgment = intuitive_docs_list[pidx]["@judgment"]
if idoc_id not in Patient:
Patient[idoc_id] = {}
if disease_name not in Patient[idoc_id]:
Patient[idoc_id][disease_name] = ijudgment
for idx in range(len(textual)):
disease_name = textual[idx]["@name"]
textual_docs_list = textual[idx]["doc"]
for pidx in range(len(textual_docs_list)):
tdoc_id = textual_docs_list[pidx]["@id"]
tjudgment = textual_docs_list[pidx]["@judgment"]
try:
ijudgment = Patient[tdoc_id][disease_name]
if ijudgment != tjudgment and tjudgment != "U" and tjudgment != "Q":
print(ijudgment, tjudgment, disease_name, tdoc_id)
except:
try:
Patient[tdoc_id][disease_name] = tjudgment
except:
Patient[tdoc_id] = {disease_name:tjudgment}
continue
return Patient
def MakeJSONOut(obesity_data,json_out,Patient):
obesity_out = {"paragraphs": [], "title": "obesity"}
for note_id in Patient:
Y_class = []
U_class = []
Q_class = []
N_class = []
patient_note = Patient[note_id]["text"]
out = {"note_id": note_id, "context": patient_note, "qas": []}
unique_questions = []
for problem in Patient[note_id]:
if problem == "text":
continue
if Patient[note_id][problem] == "Y":
Y_class.append(problem)
elif Patient[note_id][problem] == "N":
N_class.append(problem)
elif Patient[note_id][problem] == "U":
U_class.append(problem)
elif Patient[note_id][problem] == "Q":
Q_class.append(problem)
else:
print(Patient[note_id][problem])
###### not doing on all questions #####
for row in obesity_data:
question = row[2].strip()
if question == "":
continue
lform = row[3]
answer_type = row[4]
question = question.replace("\t", "")
lform = lform.replace("\t", "")
orginal = question
if answer_type == "problems":
for idx in range(len(Y_class)):
problem = Y_class[idx]
question = orginal
if problem == "Obesity":
qwords = question.split("|")
qwords[1] = problem
lform_new = lform.replace("|problem|",problem)
qwords = [word.strip() for word in qwords]
final_question = " ".join(qwords)
Answer = Y_class[0:idx] + Y_class[idx + 1:]
else:
question = orginal.replace("|problem|", problem)
lform_new = lform.replace("|problem|", problem)
filewriter_forlform.writerow([question] + [lform_new] + [question] + [lform])
continue
ans_list = []
for ans in Answer:
ans_list.append({"answer_start": "", "text": ans, "evidence": "", "evidence_start": ""})
#print(final_question)
answer = {"answers": ans_list, "id": [[final_question,final_question],lform], "question": [final_question]}
out["qas"].append(answer)
filewriter_forlform.writerow([question] + [lform_new] + [question] + [lform])
elif answer_type == "yes/no" and "|problem|" in question:
answers = ["yes", "no", "UNK"]
jdx = -1
question_template = question.split("##")
#print(question)
for temp in [Y_class, N_class, U_class]:
jdx += 1
for problem in temp:
#if problem.lower() != "obesity":
# continue
orginal_lform = lform
question_lits = question.replace("|problem|",problem).split("##")
lform_new = lform.replace("|problem|", problem)
#print(question_lits)
idx = 0
if question_lits not in unique_questions:
unique_questions.append(question_lits)
for q in question_lits:
filewriter_forlform.writerow([q] + [lform_new] + [question_template[idx]] + [orginal_lform])
idx += 1
Answer = [answers[jdx]]
ans_list = []
for ans in Answer:
ans_list.append({"answer_start": "", "text": ans, "evidence": "", "evidence_start": ""})
answer = {"answers": ans_list, "id": [zip(question_lits,question_template),orginal_lform], "question": question_lits}
out["qas"].append(answer)
else:
print(answer_type)
obesity_out["paragraphs"].append(out)
with open(json_out, 'w') as outfile:
json.dump(obesity_out, outfile)
if __name__=="__main__":
ofile = open(ql_output, "w")
filewriter_forlform = csv.writer(ofile, delimiter="\t")
filewriter_forlform.writerow(["Question", "Logical Form"])
### Read i2b2 files ###
Patient = ReadFile()
### File to read templates ###
qfile = open(templates_file)
read_data = list(csv.reader(qfile))
## read only templates relevant to obesity challenge ##
obesity_data = []
for line in read_data[1:]:
if line[0] != "obesity":
continue
obesity_data.append(line)
MakeJSONOut(obesity_data,qa_json_out,Patient)
#MakeQuestion(questions_file,out_file,Patient)
'''
def MakeQuestion(questions_file,out_file,Patient):
qfile = open(questions_file)
read_data = list(csv.reader(qfile, delimiter="\t"))
ofile = open(out_file, "w")
ofilewriter = csv.writer(ofile)
values = ["Question", "Answer", "Answer line in note", "Note ID", "Difference in QA lines"]
ofilewriter.writerow(values)
for note_id in Patient:
Y_class = []
U_class = []
Q_class = []
N_class = []
for problem in Patient[note_id]:
if Patient[note_id][problem] == "Y":
Y_class.append(problem)
elif Patient[note_id][problem] == "N":
N_class.append(problem)
elif Patient[note_id][problem] == "U":
U_class.append(problem)
elif Patient[note_id][problem] == "Q":
Q_class.append(problem)
else:
print(Patient[note_id][problem])
for row in read_data[1:4]:
question = row[1].strip()
if question == "":
continue
#print(row)
answer_type = row[3]
question_in = row[0] #question_concept_type
if answer_type == "problems":
for idx in range(len(Y_class)):
problem = Y_class[idx]
qwords = question.split("|")
qwords[1] = problem
qwords = [word.strip() for word in qwords]
final_question = " ".join(qwords)
Answer = Y_class[0:idx]+Y_class[idx+1:]
ofilewriter.writerow([final_question," ".join(Answer), "", note_id, ""])
elif answer_type == "yes/no" and question_in == "problem":
answers = ["yes","no",""]
jdx = -1
for temp in [Y_class,N_class,U_class]:
jdx += 1
for idx in range(len(temp)):
problem = temp[idx]
qwords = question.split("|")
qwords[1] = problem
qwords = [word.strip() for word in qwords]
final_question = " ".join(qwords)
Answer = answers[jdx]
ofilewriter.writerow([final_question,Answer, "", note_id, ""])
elif answer_type == "yes/no" and question_in == "None":
try:
if Patient[note_id]["Obesity"] == "Y":
ofilewriter.writerow([question, "yes", "", note_id, ""])
if Patient[note_id]["Obesity"] == "N":
ofilewriter.writerow([question, "no", "", note_id, ""])
if Patient[note_id]["Obesity"] == "U":
ofilewriter.writerow([question, "", "", note_id, ""])
except:
print(Patient[note_id].keys())
else:
print(answer_type,question_in)
'''
| 11,449
| 36.540984
| 141
|
py
|
3DTrans
|
3DTrans-master/setup.py
|
import os
import subprocess
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
def get_git_commit_number():
if not os.path.exists('.git'):
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
return git_commit_number
def make_cuda_ext(name, module, sources):
cuda_ext = CUDAExtension(
name='%s.%s' % (module, name),
sources=[os.path.join(*module.split('.'), src) for src in sources]
)
return cuda_ext
def write_version_to_file(version, target_file):
with open(target_file, 'w') as f:
print('__version__ = "%s"' % version, file=f)
if __name__ == '__main__':
version = '0.5.2+%s' % get_git_commit_number()
write_version_to_file(version, 'pcdet/version.py')
setup(
name='pcdet',
version=version,
description='3DTrans Autonomous Driving Transfer Learning Codebase',
install_requires=[
'numpy',
'llvmlite',
'numba',
'tensorboardX',
'easydict',
'pyyaml',
'scikit-image',
'tqdm',
'SharedArray',
# 'spconv', # spconv has different names depending on the cuda version
],
author='3DTrans Development Team',
author_email='bo.zhangzx@gmail.com',
license='Apache License 2.0',
packages=find_packages(exclude=['tools', 'data', 'output']),
cmdclass={
'build_ext': BuildExtension,
},
ext_modules=[
make_cuda_ext(
name='iou3d_nms_cuda',
module='pcdet.ops.iou3d_nms',
sources=[
'src/iou3d_cpu.cpp',
'src/iou3d_nms_api.cpp',
'src/iou3d_nms.cpp',
'src/iou3d_nms_kernel.cu',
]
),
make_cuda_ext(
name='roiaware_pool3d_cuda',
module='pcdet.ops.roiaware_pool3d',
sources=[
'src/roiaware_pool3d.cpp',
'src/roiaware_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='roipoint_pool3d_cuda',
module='pcdet.ops.roipoint_pool3d',
sources=[
'src/roipoint_pool3d.cpp',
'src/roipoint_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='pointnet2_stack_cuda',
module='pcdet.ops.pointnet2.pointnet2_stack',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/voxel_query.cpp',
'src/voxel_query_gpu.cu',
'src/vector_pool.cpp',
'src/vector_pool_gpu.cu'
],
),
make_cuda_ext(
name='pointnet2_batch_cuda',
module='pcdet.ops.pointnet2.pointnet2_batch',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
),
],
)
| 3,945
| 31.344262
| 83
|
py
|
3DTrans
|
3DTrans-master/tools/train_active_CLUE.py
|
import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_CLUE import train_active_model_target
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=cfg['ANNOTATION_BUDGET'], total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_target(
model=model,
optimizer=optimizer_detector,
source_train_loader=source_loader,
target_train_loader=target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_detector,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 10,579
| 42.00813
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/train_multi_db.py
|
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader_mdf, build_dataloader
from pcdet.models import build_network_multi_db, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_multi_db_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--frozen_backbone', action='store_true', default=False, help='froze the backbone when training')
parser.add_argument('--source_one_name', type=str, default="nusc", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.source_one_name not in ["waymo", "nusc", "kitti"]:
raise RuntimeError('Does not exist for source_one_name')
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
logger.info('**********************Using Two DataLoader and Merge Loss**********************')
logger.info('**********************VALUE of source_one_name= %s**********************' % args.source_one_name)
source_set, source_loader, source_sampler = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_set_2, source_loader_2, source_sampler_2 = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# add the dataset_source flag into uni3d_norm layer, for training stage, we use the default value of 1
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_3D_MoE', None):
cfg.MODEL.DENSE_3D_MoE.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": 1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": 1})
model = build_network_multi_db(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
dataset=source_set, dataset_s2=source_set_2, source_one_name=args.source_one_name)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if args.frozen_backbone:
logger.info('**********************Note that Frozen Backbone: %s**********************')
model.frozen_model(model)
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()],
find_unused_parameters=True)
# model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
max_len_dataset = len(source_loader) if len(source_loader) > len(source_loader_2) else len(source_loader_2)
total_iters_each_epoch = max_len_dataset if not args.merge_all_iters_to_one_epoch \
else max_len_dataset // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
train_func = train_model
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_func(
model,
optimizer,
source_loader,
source_loader_2,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 11,953
| 44.800766
| 142
|
py
|
3DTrans
|
3DTrans-master/tools/train_pointcontrast.py
|
print('program started',)
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_unsupervised_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from unsupervised_utils.pointcontrast_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666 + cfg.LOCAL_RANK)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
args.batch_size = {
'unlabeled': args.batch_size,
'test': args.batch_size
}
# build unsupervised dataloader
datasets, dataloaders, samplers = build_unsupervised_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
root_path=cfg.DATA_CONFIG.DATA_PATH,
dist=dist_train, workers=args.workers,
logger=logger,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=datasets['unlabeled'])
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=len(datasets['unlabeled']), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
voxel_size = cfg.DATA_CONFIG.VOXEL_SIZE
point_cloud_range = cfg.DATA_CONFIG.POINT_CLOUD_RANGE
train_model(
model=model,
optimizer=optimizer,
train_loader=dataloaders['unlabeled'],
lr_scheduler=lr_scheduler,
cfg=cfg.OPTIMIZATION,
voxel_size=voxel_size,
point_cloud_range=point_cloud_range,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
train_sampler=samplers['unlabeled'],
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
if hasattr(datasets['unlabeled'], 'use_shared_memory') and datasets['unlabeled'].use_shared_memory:
datasets['unlabeled'].clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = datasets['test'], dataloaders['test'], samplers['test']
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 9,328
| 44.286408
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/test.py
|
import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
print("******model for testing",model)
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
if cfg.get('DATA_CONFIG_TAR', None):
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
else:
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 8,740
| 40.42654
| 120
|
py
|
3DTrans
|
3DTrans-master/tools/train_ada.py
|
import os
import math
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from tools.test import repeat_eval_ckpt
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_source_utils import train_active_model_source_only
from train_utils.train_active_target_utils import train_active_model_dual_tar
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1])
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg.get('ADA_STAGE', None) == 'TARGET':
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
else:
source_sample_set, source_sample_loader, source_sample_sampler = None
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
if cfg.get('ADA_STAGE', None) == 'SOURCE':
total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
else:
total_iters_each_epoch = len(source_sample_loader)
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if cfg.get('ADA_STAGE', None) == 'SOURCE':
train_active_model_source_only(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
sample_loader=None,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
total_iters_each_epoch=total_iters_each_epoch,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
source_budget=cfg.SOURCE_THRESHOD,
source_file_path=cfg.DATA_CONFIG.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
sample_sampler=None,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
else:
train_active_model_dual_tar(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
source_sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
source_sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 13,489
| 42.376206
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/_init_path.py
|
import sys
sys.path.insert(0, '../')
| 36
| 17.5
| 25
|
py
|
3DTrans
|
3DTrans-master/tools/train_uda.py
|
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_st_utils import train_model_st
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg.get('SELF_TRAIN', None):
target_set, target_loader, target_sampler = build_dataloader(
cfg.DATA_CONFIG_TAR, cfg.DATA_CONFIG_TAR.CLASS_NAMES, args.batch_size,
dist_train, workers=args.workers, logger=logger, training=True
)
else:
target_set = target_loader = target_sampler = None
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
if cfg.get('SELF_TRAIN', None):
total_iters_each_epoch = len(target_loader) if not args.merge_all_iters_to_one_epoch \
else len(target_loader) // args.epochs
else:
total_iters_each_epoch = len(source_loader) if not args.merge_all_iters_to_one_epoch \
else len(source_loader) // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# select proper trainer
train_func = train_model_st
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_func(
model,
optimizer,
source_loader,
target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
target_sampler=target_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None
)
if cfg.get('SELF_TRAIN', None):
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
else:
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 10,156
| 42.592275
| 125
|
py
|
3DTrans
|
3DTrans-master/tools/train_semi.py
|
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
import copy
import torch
import torch.distributed as dist
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_semi_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_semi_utils import train_model
from ssl_utils.semi_train_utils import train_ssl_model
from test import repeat_eval_ckpt
from eval_utils.eval_utils import eval_one_epoch
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=8888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=1, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
parser.add_argument('--runs_on', type=str, default='server', choices=['server', 'cloud'],help='runs on server or cloud')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
class DistStudent(nn.Module):
def __init__(self, student):
super().__init__()
self.onepass = student
def forward(self, ld_batch, ud_batch):
return self.onepass(ld_batch), self.onepass(ud_batch)
class DistTeacher(nn.Module):
def __init__(self, teacher):
super().__init__()
self.onepass = teacher
def forward(self, ld_batch, ud_batch):
if ld_batch is not None:
return self.onepass(ld_batch), self.onepass(ud_batch)
else:
return None, self.onepass(ud_batch)
def main():
args, cfg = parse_config()
if args.runs_on == 'cloud':
cfg.DATA_CONFIG.DATA_PATH = cfg.DATA_CONFIG.CLOUD_DATA_PATH
if args.launcher == 'none':
dist_train = False
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
if args.runs_on == 'cloud':
output_dir = Path('/cache/output/') / cfg.EXP_GROUP_PATH / cfg.TAG
pretrain_ckpt_dir = output_dir / 'pretrain_ckpt'
ssl_ckpt_dir = output_dir / 'ssl_ckpt'
student_ckpt_dir = output_dir / 'ssl_ckpt' / 'student'
teacher_ckpt_dir = output_dir / 'ssl_ckpt' / 'teacher'
output_dir.mkdir(parents=True, exist_ok=True)
pretrain_ckpt_dir.mkdir(parents=True, exist_ok=True)
student_ckpt_dir.mkdir(parents=True, exist_ok=True)
teacher_ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
batch_size = {
'pretrain': cfg.OPTIMIZATION.PRETRAIN.BATCH_SIZE_PER_GPU,
'labeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.LD_BATCH_SIZE_PER_GPU,
'unlabeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.UD_BATCH_SIZE_PER_GPU,
'test': cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU,
}
# -----------------------create dataloader & network & optimizer---------------------------
datasets, dataloaders, samplers = build_semi_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
root_path=cfg.DATA_CONFIG.DATA_PATH,
workers=args.workers,
logger=logger,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
)
# --------------------------------stage I pretraining---------------------------------------
logger.info('************************Stage I Pretraining************************')
MODEL_PRETRAINED = copy.deepcopy(cfg.MODEL)
pretrain_model = build_network(model_cfg=MODEL_PRETRAINED, num_class=len(cfg.CLASS_NAMES), dataset=datasets['pretrain'])
pretrain_model.set_model_type('origin')
if cfg.get('USE_PRETRAIN_MODEL', False):
pretrain_ckpt = cfg.PRETRAIN_CKPT
if args.runs_on == 'cloud':
pretrain_ckpt = cfg.CLOUD_PRETRAIN_CKPT
pretrain_model.load_params_from_file(filename=pretrain_ckpt, logger=logger, to_cpu=dist_train)
pretrain_model.cuda()
pretrain_model.eval() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
pretrain_model = nn.parallel.DistributedDataParallel(pretrain_model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(pretrain_model)
eval_pretrain_dir = output_dir / 'eval' / 'eval_with_pretraining'
eval_pretrain_dir.mkdir(parents=True, exist_ok=True)
eval_one_epoch(cfg, pretrain_model, dataloaders['test'], -1, logger, dist_test=dist_train, save_to_file=False, result_dir=eval_pretrain_dir)
else:
pretrain_model.cuda()
pretrain_optimizer = build_optimizer(pretrain_model, cfg.OPTIMIZATION.PRETRAIN)
pretrain_model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
pretrain_model = nn.parallel.DistributedDataParallel(pretrain_model, device_ids=[
cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(pretrain_model)
last_epoch = -1
start_epoch = it = 0
pretrain_lr_scheduler, pretrain_lr_warmup_scheduler = build_scheduler(
pretrain_optimizer, total_iters_each_epoch=len(dataloaders['pretrain']),
total_epochs=cfg.OPTIMIZATION.PRETRAIN.NUM_EPOCHS,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.PRETRAIN
)
logger.info('**********************Start pre-training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
pretrain_model,
pretrain_optimizer,
dataloaders['pretrain'],
model_func=model_fn_decorator(),
lr_scheduler=pretrain_lr_scheduler,
optim_cfg=cfg.OPTIMIZATION.PRETRAIN,
start_epoch=start_epoch,
total_epochs=cfg.OPTIMIZATION.PRETRAIN.NUM_EPOCHS,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=pretrain_ckpt_dir,
train_sampler=samplers['pretrain'],
lr_warmup_scheduler=pretrain_lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
logger.info('**********************End pre-training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for pre-training %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_pretrain_dir = output_dir / 'eval' / 'eval_with_pretraining'
eval_pretrain_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = cfg.OPTIMIZATION.PRETRAIN.NUM_EPOCHS - 10
repeat_eval_ckpt(
model=pretrain_model.module if dist_train else pretrain_model,
test_loader=dataloaders['test'],
args=args,
eval_output_dir=eval_pretrain_dir,
logger=logger,
ckpt_dir=pretrain_ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation for pre-training %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# --------------------------------stage II SSL training---------------------------------------
logger.info('************************Stage II SSL training************************')
MODEL_TEACHER = copy.deepcopy(cfg.MODEL)
teacher_model = build_network(model_cfg=MODEL_TEACHER, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
"""
for param in teacher_model.parameters(): # ema teacher model
param.detach_()
"""
MODEL_STUDENT = copy.deepcopy(cfg.MODEL)
student_model = build_network(model_cfg=MODEL_STUDENT, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
teacher_model.set_model_type('teacher')
student_model.set_model_type('student')
teacher_model.cuda()
student_model.cuda()
# only update student model by gradient descent, teacher model are updated by EMA
student_optimizer = build_optimizer(student_model, cfg.OPTIMIZATION.SEMI_SUP_LEARNING.STUDENT)
# load checkpoint if it is possible
last_epoch = -1
start_epoch = it = 0
based_on_pretrained = True
teacher_ckpt_list = glob.glob(str(teacher_ckpt_dir / '*checkpoint_epoch_*.pth'))
student_ckpt_list = glob.glob(str(student_ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(teacher_ckpt_list) > 0 and len(student_ckpt_list) > 0:
based_on_pretrained = False
teacher_ckpt_list.sort(key=os.path.getmtime)
student_ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = teacher_model.load_params_with_optimizer(
teacher_ckpt_list[-1], to_cpu=dist_train, optimizer=student_optimizer, logger=logger
)
it, start_epoch = student_model.load_params_with_optimizer(
student_ckpt_list[-1], to_cpu=dist_train, optimizer=student_optimizer, logger=logger
)
last_epoch = start_epoch + 1
if based_on_pretrained:
if cfg.get('USE_PRETRAIN_MODEL', False):
pretrained_model = cfg.PRETRAIN_CKPT
if args.runs_on == 'cloud':
pretrained_model = cfg.CLOUD_PRETRAIN_CKPT
else:
ckpt_list = glob.glob(str(pretrain_ckpt_dir / '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
pretrained_model = ckpt_list[-1]
teacher_model.load_params_from_file(filename=pretrained_model, to_cpu=dist, logger=logger)
student_model.load_params_from_file(filename=pretrained_model, to_cpu=dist, logger=logger)
if dist_train:
student_model = DistStudent(student_model) # add wrapper for dist training
student_model = nn.parallel.DistributedDataParallel(student_model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
# teacher doesn't need dist train
teacher_model = DistTeacher(teacher_model)
teacher_model = nn.parallel.DistributedDataParallel(teacher_model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
student_model.train()
"""
Notes: we found for pseudo labels, teacher_model.eval() is better; for EMA update and consistency, teacher_model.train() is better
"""
if cfg.OPTIMIZATION.SEMI_SUP_LEARNING.TEACHER.NUM_ITERS_PER_UPDATE == -1: # for pseudo label
teacher_model.eval() # Set to eval mode to avoid BN update and dropout
else: # for EMA teacher with consistency
teacher_model.train() # Set to train mode
for t_param in teacher_model.parameters():
t_param.requires_grad = False
logger.info(student_model)
# use unlabeled data as epoch counter
student_lr_scheduler, student_lr_warmup_scheduler = build_scheduler(
student_optimizer, total_iters_each_epoch=len(dataloaders['labeled']), total_epochs=cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.SEMI_SUP_LEARNING.STUDENT
)
logger.info('**********************Start ssl-training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
#"""
train_ssl_model(
teacher_model = teacher_model,
student_model = student_model,
student_optimizer = student_optimizer,
labeled_loader = dataloaders['labeled'],
unlabeled_loader = dataloaders['unlabeled'],
lr_scheduler=student_lr_scheduler,
ssl_cfg=cfg.OPTIMIZATION.SEMI_SUP_LEARNING,
start_epoch=start_epoch,
total_epochs=cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ssl_ckpt_dir,
labeled_sampler=samplers['labeled'],
unlabeled_sampler=samplers['unlabeled'],
lr_warmup_scheduler=student_lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
dist = dist_train
)
#"""
logger.info('**********************End ssl-training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_with_student_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS - 25
repeat_eval_ckpt(
model = student_model.module.onepass if dist_train else student_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'student',
dist_test=dist_train
)
logger.info('**********************End evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_with_teacher_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS - 25
if dist_train:
teacher_model.module.onepass.set_model_type('origin') # ret filtered boxes
else:
teacher_model.set_model_type('origin')
for t_param in teacher_model.parameters(): # Add this to avoid errors
t_param.requires_grad = True
repeat_eval_ckpt(
model = teacher_model.module.onepass if dist_train else teacher_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'teacher',
dist_test=dist_train
)
logger.info('**********************End evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 17,324
| 46.465753
| 148
|
py
|
3DTrans
|
3DTrans-master/tools/train_multi_db_merge_loss.py
|
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
from train_utils.train_multi_db_loss_merge import train_multi_db_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg.get('MULTI_DB', None):
logger.info('**********************Using Two DataLoader and Merge Loss**********************')
source_set_2, source_loader_2, source_sampler_2 = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
else:
source_set_2 = source_loader_2 = source_sampler_2 = None
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
if cfg.get('MULTI_DB', None):
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()],
broadcast_buffers=False, find_unused_parameters=True)
else:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
max_len_dataset = len(source_loader) if len(source_loader) > len(source_loader_2) else len(source_loader_2)
total_iters_each_epoch = max_len_dataset if not args.merge_all_iters_to_one_epoch \
else max_len_dataset // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# select proper trainer
if cfg.get('MULTI_DB', None):
train_func = train_multi_db_model
else:
train_func = train_model
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if cfg.get('MULTI_DB', None):
train_func(
model,
optimizer,
source_loader,
source_loader_2,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
target_sampler=source_sampler_2,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None
)
else:
train_model(
model,
optimizer,
source_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=source_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
if cfg.get('MULTI_DB', None):
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
source_set_2.clean_shared_memory()
else:
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 11,823
| 43.119403
| 125
|
py
|
3DTrans
|
3DTrans-master/tools/demo.py
|
import argparse
import glob
from pathlib import Path
try:
import open3d
from visual_utils import open3d_vis_utils as V
OPEN3D_FLAG = True
except:
import mayavi.mlab as mlab
from visual_utils import visualize_utils as V
OPEN3D_FLAG = False
import numpy as np
import torch
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
class DemoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.root_path = root_path
self.ext = ext
data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
data_file_list.sort()
self.sample_file_list = data_file_list
def __len__(self):
return len(self.sample_file_list)
def __getitem__(self, index):
if self.ext == '.bin':
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)
elif self.ext == '.npy':
points = np.load(self.sample_file_list[index])
else:
raise NotImplementedError
input_dict = {
'points': points,
'frame_id': index,
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='demo_data',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
def main():
args, cfg = parse_config()
logger = common_utils.create_logger()
logger.info('-----------------Quick Demo of 3DTrans-------------------------')
demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(args.data_path), ext=args.ext, logger=logger
)
logger.info(f'Total number of samples: \t{len(demo_dataset)}')
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
model.cuda()
model.eval()
with torch.no_grad():
for idx, data_dict in enumerate(demo_dataset):
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
pred_dicts, _ = model.forward(data_dict)
V.draw_scenes(
points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'],
ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']
)
if not OPEN3D_FLAG:
mlab.show(stop=True)
logger.info('Demo done.')
if __name__ == '__main__':
main()
| 3,748
| 32.176991
| 118
|
py
|
3DTrans
|
3DTrans-master/tools/test_multi_db_sim.py
|
import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, build_network_multi_db
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--source_1', type=int, default=2, help='if test the source_1 data')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=0, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt_parallel(model, show_db, test_loader, test_loader_s2, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch_parallel(
cfg, model, show_db, test_loader, test_loader_s2, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader_s1, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s2, test_loader_s2, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
# add the dataset_source flag into Dual_BN layer
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": 1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": 1})
#model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
model = build_network_multi_db(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
dataset=test_set, dataset_s2=test_set_s2, source_one_name=args.source_one_name)
if args.source_1 == 1:
logger.info('**********************Testing Dataset=%s**********************' % test_set.dataset_cfg.DATASET)
eval_single_ckpt_parallel(model, 1, test_loader_s1, test_loader_s2, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
elif args.source_1 == 2:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s2.dataset_cfg.DATASET)
eval_single_ckpt_parallel(model, 2, test_loader_s1, test_loader_s2, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 10,010
| 43.691964
| 142
|
py
|
3DTrans
|
3DTrans-master/tools/pseudo_label.py
|
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader, build_semi_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_pseudo_label_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# batch_size = {
# 'pretrain': cfg.OPTIMIZATION.PRETRAIN.BATCH_SIZE_PER_GPU,
# 'labeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.LD_BATCH_SIZE_PER_GPU,
# 'unlabeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.UD_BATCH_SIZE_PER_GPU,
# 'test': cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU,
# }
batch_size = {
'pretrain': args.batch_size,
'labeled': args.batch_size,
'unlabeled': args.batch_size,
'test': args.batch_size,
}
# -----------------------create dataloader & network & optimizer---------------------------
datasets, dataloaders, samplers = build_semi_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
root_path=cfg.DATA_CONFIG.DATA_PATH,
workers=args.workers,
logger=logger,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
total_iters_each_epoch = len(dataloaders['unlabeled']) if not args.merge_all_iters_to_one_epoch else len(dataloaders['unlabeled']) // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model,
optimizer,
dataloaders['labeled'],
dataloaders['unlabeled'],
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
cfg=cfg,
dist_train=dist_train,
ps_label_dir=ps_label_dir,
labeled_sampler=samplers['labeled'],
unlabeled_sampler=samplers['unlabeled'],
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None
)
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 9,821
| 42.460177
| 149
|
py
|
3DTrans
|
3DTrans-master/tools/train_multi_db_3db.py
|
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader_mdf, build_dataloader
from pcdet.models import build_network_multi_db_3, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from tools.train_utils.train_multi_db_utils_3cls import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--frozen_backbone', action='store_true', default=False, help='froze the backbone when training')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.source_one_name not in ["waymo", "nusc", "kitti"]:
raise RuntimeError('Does not exist for source_one_name')
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
logger.info('**********************Using Two DataLoader and Merge Loss**********************')
logger.info('**********************VALUE of source_one_name= %s**********************' % args.source_one_name)
source_set, source_loader, source_sampler = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_set_2, source_loader_2, source_sampler_2 = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_set_3, source_loader_3, source_sampler_3 = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG_SRC_3,
class_names=cfg.DATA_CONFIG_SRC_3.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# add the dataset_source flag into Dual_BN layer, for training stage, we use the default value of 1
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_3D_MoE', None):
cfg.MODEL.DENSE_3D_MoE.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": 1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": 1})
model = build_network_multi_db_3(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
num_class_s3=len(cfg.DATA_CONFIG_SRC_3.CLASS_NAMES), dataset=source_set, dataset_s2=source_set_2, dataset_s3=source_set_3, \
source_one_name=args.source_one_name, source_1=1)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if args.frozen_backbone:
logger.info('**********************Note that Frozen Backbone: %s**********************')
model.frozen_model(model)
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()],
find_unused_parameters=True)
# model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
max_len_dataset = len(source_loader) if len(source_loader) > len(source_loader_2) else len(source_loader_2)
total_iters_each_epoch = max_len_dataset if not args.merge_all_iters_to_one_epoch \
else max_len_dataset // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
train_func = train_model
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_func(
model,
optimizer,
source_loader,
source_loader_2,
source_loader_3,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 12,450
| 44.441606
| 144
|
py
|
3DTrans
|
3DTrans-master/tools/train_active_dual_target.py
|
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_target_utils import train_active_model_dual_tar
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg['DATA_CONFIG']['DATASET'] == 'ActiveWaymoDataset' and cfg['DATA_CONFIG_TAR']['DATASET'] == 'ActiveNuScenesDataset':
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SRC_SAMPLE,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=None,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
else:
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=len(source_sample_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_dual_tar(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
source_sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
source_sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 12,316
| 42.83274
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/train_active_source.py
|
import _init_path
import os
import math
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from tools.test import eval_single_ckpt
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_source_utils import train_active_model_source_only
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# sample_set, sample_loader, sample_sampler = build_dataloader_ada(
# dataset_cfg=cfg.DATA_CONFIG_TAR,
# class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers,
# logger=logger,
# training=True,
# info_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
# merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
# total_epochs=args.epochs
# )
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_source_only(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
sample_loader=None,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
total_iters_each_epoch=total_iters_each_epoch,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
source_budget=cfg.SOURCE_THRESHOD,
source_file_path=cfg.DATA_CONFIG.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
sample_sampler=None,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# logger.info('**********************Start evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# test_set, test_loader, sampler = build_dataloader_ada(
# dataset_cfg=cfg.DATA_CONFIG,
# class_names=cfg.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers, logger=logger, training=False
# )
# eval_output_dir = output_dir / 'eval' / 'eval_with_train'
# eval_output_dir.mkdir(parents=True, exist_ok=True)
# args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
# 0) # Only evaluate the last args.num_epochs_to_eval epochs
# args.ckpt = ckpt_dir / 'checkpoint_epoch_%d.pth' % args.epochs
# eval_single_ckpt(
# model.module if dist_train else model,
# test_loader, args, eval_output_dir, logger, ckpt_dir,
# dist_test=dist_train
# )
# logger.info('**********************End evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 11,822
| 42.307692
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/test_multi_db.py
|
import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, build_network_multi_db
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--source_1', type=int, default=2, help='if test the source_1 data')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader_s1, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s2, test_loader_s2, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
# add the dataset_source flag into Dual_BN layer
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": args.source_1})
if cfg.MODEL.get('DENSE_3D_MoE', None):
cfg.MODEL.DENSE_3D_MoE.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": args.source_1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": args.source_1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": args.source_1})
model = build_network_multi_db(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
dataset=test_set, dataset_s2=test_set_s2, source_one_name=args.source_one_name)
if args.source_1 == 1:
logger.info('**********************Testing Dataset=%s**********************' % test_set.dataset_cfg.DATASET)
test_loader = test_loader_s1
elif args.source_1 == 2:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s2.dataset_cfg.DATASET)
test_loader = test_loader_s2
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 10,103
| 42.74026
| 142
|
py
|
3DTrans
|
3DTrans-master/tools/train_random.py
|
import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils, active_learning_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_random_utils import train_model
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=0, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
target_list_dir = output_dir / 'target_list'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
source_list = active_learning_utils.get_dataset_list(cfg['DATA_CONFIG']['FILE_PATH'], oss=True)
target_list = active_learning_utils.get_dataset_list(cfg['DATA_CONFIG_TAR']['FILE_PATH'], oss=True)
sample_source_path, sample_target_path = active_learning_utils.random_sample(source_list, target_list, cfg['SOURCE_BUDGET'], cfg['ANNOTATION_BUDGET'], target_list_dir)
source_set, source_loader, source_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=sample_source_path,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
target_set, target_loader, target_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=sample_target_path,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
lr_scheduler, lr_warmup_scheduler_detector = build_scheduler(
optimizer, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model=model,
optimizer=optimizer,
train_source_loader=source_loader,
train_target_loader=target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=source_sampler,
target_sampler=target_sampler,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# logger.info('**********************Start evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# test_set, test_loader, sampler = build_dataloader(
# dataset_cfg=cfg.DATA_CONFIG,
# class_names=cfg.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers, logger=logger, training=False
# )
# eval_output_dir = output_dir / 'eval' / 'eval_with_train'
# eval_output_dir.mkdir(parents=True, exist_ok=True)
# args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
# 0) # Only evaluate the last args.num_epochs_to_eval epochs
# repeat_eval_ckpt(
# model.module if dist_train else model,
# test_loader, args, eval_output_dir, logger, ckpt_dir,
# dist_test=dist_train
# )
# logger.info('**********************End evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 10,606
| 43.195833
| 171
|
py
|
3DTrans
|
3DTrans-master/tools/test_semi.py
|
import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import copy
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_semi_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
print("******model for testing",model)
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
break
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ssl_ckpt_dir = output_dir / 'ssl_ckpt'
eval_output_dir = output_dir / 'eval'
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
batch_size = {
'pretrain': cfg.OPTIMIZATION.PRETRAIN.BATCH_SIZE_PER_GPU,
'labeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.LD_BATCH_SIZE_PER_GPU,
'unlabeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.UD_BATCH_SIZE_PER_GPU,
'test': cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU,
}
# -----------------------create dataloader & network & optimizer---------------------------
datasets, dataloaders, samplers = build_semi_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_test,
root_path=cfg.DATA_CONFIG.DATA_PATH,
workers=args.workers,
logger=logger,
)
MODEL_TEACHER = copy.deepcopy(cfg.MODEL)
teacher_model = build_network(model_cfg=MODEL_TEACHER, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
MODEL_STUDENT = copy.deepcopy(cfg.MODEL)
student_model = build_network(model_cfg=MODEL_STUDENT, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
teacher_model.set_model_type('teacher')
student_model.set_model_type('student')
with torch.no_grad():
logger.info('**********************Start evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_all' / 'eval_with_student_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
repeat_eval_ckpt(
model = student_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'student',
dist_test=dist_test
)
logger.info('**********************End evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_all' / 'eval_with_teacher_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
teacher_model.set_model_type('origin')
repeat_eval_ckpt(
model = teacher_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'teacher',
dist_test=dist_test
)
logger.info('**********************End evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 9,442
| 41.15625
| 120
|
py
|
3DTrans
|
3DTrans-master/tools/train_random_target.py
|
import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils, active_learning_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=0, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
target_list_dir = output_dir / 'target_list'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
target_list = active_learning_utils.get_dataset_list(cfg['DATA_CONFIG']['FILE_PATH'], oss=True)
sample_target_path = active_learning_utils.random_sample_target(target_list, cfg['ANNOTATION_BUDGET'], target_list_dir)
target_set, target_loader, target_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.DATA_CONFIG.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=sample_target_path,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=target_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
lr_scheduler, lr_warmup_scheduler_detector = build_scheduler(
optimizer, total_iters_each_epoch=len(target_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model=model,
optimizer=optimizer,
train_loader=target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=target_sampler,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False
)
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# logger.info('**********************Start evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# test_set, test_loader, sampler = build_dataloader(
# dataset_cfg=cfg.DATA_CONFIG,
# class_names=cfg.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers, logger=logger, training=False
# )
# eval_output_dir = output_dir / 'eval' / 'eval_with_train'
# eval_output_dir.mkdir(parents=True, exist_ok=True)
# args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
# 0) # Only evaluate the last args.num_epochs_to_eval epochs
# repeat_eval_ckpt(
# model.module if dist_train else model,
# test_loader, args, eval_output_dir, logger, ckpt_dir,
# dist_test=dist_train
# )
# logger.info('**********************End evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 9,802
| 42.568889
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/train.py
|
print('program started',)
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666 + cfg.LOCAL_RANK)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
train_set, train_loader, train_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model,
optimizer,
train_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=train_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
if hasattr(train_set, 'use_shared_memory') and train_set.use_shared_memory:
train_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 9,054
| 43.605911
| 125
|
py
|
3DTrans
|
3DTrans-master/tools/test_multi_db_3db.py
|
import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, build_network_multi_db_3
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--source_1', type=int, default=1, help='if test the source_1 data')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader_s1, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s2, test_loader_s2, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s3, test_loader_s3, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_3,
class_names=cfg.DATA_CONFIG_SRC_3.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
# add the dataset_source flag into Dual_BN layer
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": args.source_1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": args.source_1})
#model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
model = build_network_multi_db_3(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
num_class_s3=len(cfg.DATA_CONFIG_SRC_3.CLASS_NAMES), dataset=test_set, dataset_s2=test_set_s2, dataset_s3=test_set_s3, \
source_one_name=args.source_one_name, source_1=args.source_1)
if args.source_1 == 1:
logger.info('**********************Testing Dataset=%s**********************' % test_set.dataset_cfg.DATASET)
test_loader = test_loader_s1
elif args.source_1 == 2:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s2.dataset_cfg.DATASET)
test_loader = test_loader_s2
elif args.source_1 == 3:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s3.dataset_cfg.DATASET)
test_loader = test_loader_s3
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 10,576
| 43.441176
| 144
|
py
|
3DTrans
|
3DTrans-master/tools/train_active_TQS.py
|
import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_TQS import train_active_model_target
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_mul_cls = build_optimizer(model.roi_head, cfg.OPTIMIZATION.MUL_CLS) #dense head
optimizer_list = [optimizer_detector, optimizer_discriminator, optimizer_mul_cls]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=len(source_sample_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_mul_cls, lr_warmup_scheduler_mul_cls = build_scheduler(
optimizer_mul_cls, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.MUL_CLS
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator, lr_scheduler_mul_cls]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_target(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 12,020
| 42.554348
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/train_bi3d_st3d.py
|
import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.active_with_st3d_utils import train_active_with_st3d
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
cfg.DATA_CONFIG_TAR.USE_PSEUDO_LABEL = False
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
total_iters_each_epoch = len(target_loader) if not args.merge_all_iters_to_one_epoch else len(target_loader) // args.epochs
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_with_st3d(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
source_sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
ps_label_dir=ps_label_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
source_sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
print('start')
main()
| 11,970
| 42.530909
| 169
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/static_once.py
|
import pickle
import numpy as np
import pandas as pd
import json
once_car = None
once_truck = None
once_bus = None
once_veh = None
once_cyc = None
once_ped = None
with open('./once_infos_train.pkl', 'rb') as f:
once_train_info = pickle.load(f)
json_str = json.dumps(once_train_info[5])
with open('./example.json', 'w') as f:
f.write(json_str)
with open('./once_infos_val.pkl', 'rb') as f:
once_val_info = pickle.load(f)
once_train_info = once_train_info + once_val_info
Veh = ['Car', 'Truck', 'Bus']
num = 0
for i, item in enumerate(once_train_info):
try:
gt_boxes = item['annos']['boxes_3d']
gt_names = item['annos']['name']
except:
continue
num = num + 1
mask_car = gt_names == 'Car'
mask_truck = gt_names == 'Truck'
mask_bus = gt_names == 'Bus'
mask_cyc = gt_names == 'Cyclist'
mask_ped = gt_names == 'Pedestrian'
mask_veh = []
for j in range(len(gt_names)):
if gt_names[j] in Veh:
mask_veh.append(True)
else:
mask_veh.append(False)
car_info = gt_boxes[mask_car]
truck_info = gt_boxes[mask_truck]
bus_info = gt_boxes[mask_bus]
cyc_info = gt_boxes[mask_cyc]
ped_info = gt_boxes[mask_ped]
veh_info = gt_boxes[mask_veh]
if i == 0:
once_car = car_info
once_truck = truck_info
once_bus = bus_info
once_cyc = cyc_info
once_ped = ped_info
once_veh = veh_info
else:
try:
once_car = np.concatenate([once_car, car_info], axis=0)
except:
pass
try:
once_truck = np.concatenate([once_truck, truck_info], axis=0)
except:
pass
try:
once_bus = np.concatenate([once_bus, bus_info], axis=0)
except:
pass
try:
once_cyc = np.concatenate([once_cyc, cyc_info], axis=0)
except:
pass
try:
once_ped = np.concatenate([once_ped, ped_info], axis=0)
except:
pass
try:
once_veh = np.concatenate([once_veh, veh_info], axis=0)
except:
pass
print(num)
print('car_num: %d' % len(once_car))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_car[:, 2]), np.std(once_car[:, 2]), np.min(once_car[:, 2]), np.max(once_car[:, 2]), np.median(once_car[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_car[:, 3]), np.std(once_car[:, 3]), np.min(once_car[:, 3]), np.max(once_car[:, 3]), np.median(once_car[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_car[:, 4]), np.std(once_car[:, 4]), np.min(once_car[:, 4]), np.max(once_car[:, 4]), np.median(once_car[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_car[:, 5]), np.std(once_car[:, 5]), np.min(once_car[:, 5]), np.max(once_car[:, 5]), np.median(once_car[:, 5])))
print('truck_num: %d' % len(once_truck))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_truck[:, 2]), np.std(once_truck[:, 2]), np.min(once_truck[:, 2]), np.max(once_truck[:, 2]), np.median(once_truck[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_truck[:, 3]), np.std(once_truck[:, 3]), np.min(once_truck[:, 3]), np.max(once_truck[:, 3]), np.median(once_truck[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_truck[:, 4]), np.std(once_truck[:, 4]), np.min(once_truck[:, 4]), np.max(once_truck[:, 4]), np.median(once_truck[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_truck[:, 5]), np.std(once_truck[:, 5]), np.min(once_truck[:, 5]), np.max(once_truck[:, 5]), np.median(once_truck[:, 5])))
print('bus_num: %d' % len(once_bus))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_bus[:, 2]), np.std(once_bus[:, 2]), np.min(once_bus[:, 2]), np.max(once_bus[:, 2]), np.median(once_bus[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_bus[:, 3]), np.std(once_bus[:, 3]), np.min(once_bus[:, 3]), np.max(once_bus[:, 3]), np.median(once_bus[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_bus[:, 4]), np.std(once_bus[:, 4]), np.min(once_bus[:, 4]), np.max(once_bus[:, 4]), np.median(once_bus[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_bus[:, 5]), np.std(once_bus[:, 5]), np.min(once_bus[:, 5]), np.max(once_bus[:, 5]), np.median(once_bus[:, 5])))
print('ped_num: %d' % len(once_ped))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_ped[:, 2]), np.std(once_ped[:, 2]), np.min(once_ped[:, 2]), np.max(once_ped[:, 2]), np.median(once_ped[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_ped[:, 3]), np.std(once_ped[:, 3]), np.min(once_ped[:, 3]), np.max(once_ped[:, 3]), np.median(once_ped[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_ped[:, 4]), np.std(once_ped[:, 4]), np.min(once_ped[:, 4]), np.max(once_ped[:, 4]), np.median(once_ped[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_ped[:, 5]), np.std(once_ped[:, 5]), np.min(once_ped[:, 5]), np.max(once_ped[:, 5]), np.median(once_ped[:, 5])))
print('cyc_num: %d' % len(once_cyc))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_cyc[:, 2]), np.std(once_cyc[:, 2]), np.min(once_cyc[:, 2]), np.max(once_cyc[:, 2]), np.median(once_cyc[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_cyc[:, 3]), np.std(once_cyc[:, 3]), np.min(once_cyc[:, 3]), np.max(once_cyc[:, 3]), np.median(once_cyc[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_cyc[:, 4]), np.std(once_cyc[:, 4]), np.min(once_cyc[:, 4]), np.max(once_cyc[:, 4]), np.median(once_cyc[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_cyc[:, 5]), np.std(once_cyc[:, 5]), np.min(once_cyc[:, 5]), np.max(once_cyc[:, 5]), np.median(once_cyc[:, 5])))
print('veh_num: %d' % len(once_veh))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_veh[:, 2]), np.std(once_veh[:, 2]), np.min(once_veh[:, 2]), np.max(once_veh[:, 2]), np.median(once_veh[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_veh[:, 3]), np.std(once_veh[:, 3]), np.min(once_veh[:, 3]), np.max(once_veh[:, 3]), np.median(once_veh[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_veh[:, 4]), np.std(once_veh[:, 4]), np.min(once_veh[:, 4]), np.max(once_veh[:, 4]), np.median(once_veh[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(once_veh[:, 5]), np.std(once_veh[:, 5]), np.min(once_veh[:, 5]), np.max(once_veh[:, 5]), np.median(once_veh[:, 5])))
once_car_df = pd.DataFrame(once_car, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
once_car_df.to_csv('once_car.csv')
once_truck_df = pd.DataFrame(once_truck, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
once_truck_df.to_csv('once_truck.csv')
once_bus_df = pd.DataFrame(once_bus, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
once_bus_df.to_csv('once_bus.csv')
once_ped_df = pd.DataFrame(once_ped, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
once_ped_df.to_csv('once_pedestrian.csv')
once_cyc_df = pd.DataFrame(once_cyc, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
once_cyc_df.to_csv('once_cyclist.csv')
| 7,801
| 55.536232
| 202
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/merge_labels.py
|
import pickle
from re import L
from turtle import st
import numpy as np
import argparse
def main(args):
assert args.raw_data_pkl != None, 'raw_data path cannot be None'
with open(args.raw_data_pkl, 'rb') as f:
raw_data_info = pickle.load(f)
class_names = []
if args.vehicle_pkl:
with open(args.vehicle_pkl, 'rb') as f:
vehicle_result = pickle.load(f)
class_names.append('Vehicle')
assert len(vehicle_result) == len(raw_data_info), 'Vehicle file and raw data file are not corresponded'
else:
print('++ No vehicle pseudo info.')
if args.cyclist_pkl:
with open(args.cyclist_pkl, 'rb') as f:
cyclist_result = pickle.load(f)
class_names.append('Cyclist')
assert len(cyclist_result) == len(raw_data_info), 'Cyclist file and raw data file are not corresponded'
else:
print('++ No cyclist pseudo info.')
if args.pedestrian_pkl:
with open(args.pedestrian_pkl, 'rb') as f:
pedestrian_result = pickle.load(f)
class_names.append('Pedestrian')
assert len(pedestrian_result) == len(raw_data_info), 'Pedestrian file and raw data file are not corresponded'
else:
print('++ No pedestrian pseudo info.')
vehi_num = 0
cyc_num = 0
pede_num = 0
for i, raw_data in enumerate(raw_data_info):
if 'Vehicle' in class_names:
veh = vehicle_result[i]
assert veh['frame_id'] == raw_data['frame_id']
gt_mask = veh['name'] == 'Vehicle'
vehi_num = vehi_num + np.sum(gt_mask!=0)
gt_names_veh = list(veh['name'][gt_mask])
gt_boxes_veh = list(veh['boxes_3d'][gt_mask])
gt_score_veh = list(veh['score'][gt_mask])
else:
gt_names_veh = []
gt_boxes_veh = []
gt_score_veh = []
if 'Cyclist' in class_names:
cyc = cyclist_result[i]
assert cyc['frame_id'] == raw_data['frame_id']
gt_mask = cyc['name'] == 'Cyclist'
cyc_num = cyc_num + np.sum(gt_mask!=0)
gt_names_cyc = list(cyc['name'][gt_mask])
gt_boxes_cyc = list(cyc['boxes_3d'][gt_mask])
gt_score_cyc = list(cyc['score'][gt_mask])
else:
gt_names_cyc = []
gt_boxes_cyc = []
gt_score_cyc = []
if 'Pedestrian' in class_names:
ped = pedestrian_result[i]
assert ped['frame_id'] == raw_data['frame_id']
gt_mask = ped['name'] == 'Pedestrian'
pede_num = pede_num + np.sum(gt_mask!=0)
gt_names_ped = list(ped['name'][gt_mask])
gt_boxes_ped = list(ped['boxes_3d'][gt_mask])
gt_score_ped = list(ped['score'][gt_mask])
else:
gt_names_ped = []
gt_boxes_ped = []
gt_score_ped = []
gt_names = np.array(gt_names_veh + gt_names_cyc + gt_names_ped)
gt_boxes = np.array(gt_boxes_veh + gt_boxes_cyc + gt_boxes_ped, dtype=np.float64)
gt_scores = np.array(gt_score_veh + gt_score_cyc + gt_score_ped)
if gt_names.size == 0:
continue
else:
annos = {
'name': gt_names,
'boxes_3d': gt_boxes,
'boxes_score': gt_scores
}
raw_data.update({'annos': annos})
with open(args.save_path, 'wb') as f:
pickle.dump(raw_data_info, f)
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--vehicle_pkl', type=str, default='')
parser.add_argument('--cyclist_pkl', type=str, default='')
parser.add_argument('--pedestrian_pkl',type=str, default='')
parser.add_argument('--raw_data_pkl', type=str, default='')
parser.add_argument('--save_path', type=str, default='test_1.pkl')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_config()
main(args)
| 4,034
| 35.026786
| 123
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/vis_openmdf.py
|
import os
import boto3
import io
import numpy as np
import argparse
import pickle
import os
import pickle
import open3d_vis_utils as V
from dataset import Dataset
def read_s3_pkl(bucket_name, pkl_path):
obj = client.get_object(Bucket=bucket_name, Key=pkl_path)
infos = pickle.load(io.BytesIO(obj['Body'].read()))
return infos
def check_annos(info):
return 'annos' in info
def vis_scene(args):
DATA = Dataset(args)
if args.val_pkl_path is not None:
try:
infos_val = read_s3_pkl(args.bucket_name, args.val_pkl_path)
except:
with open(args.val_pkl_path, 'rb') as f:
infos_val = pickle.load(f)
if args.dataset_name == 'once' and args.vis_gt:
infos_val = list(filter(check_annos, infos_val))
if args.res_path is not None:
pkl_z = pickle.load(open(args.res_path, 'rb'))
else:
pkl_z = None
if not os.path.exists(args.dataset_name):
os.mkdir(args.dataset_name)
for idx, info in enumerate(infos_val):
print(idx)
if idx < 730:
continue
pointcloud, gt_boxes = DATA.get_data(args, info)
if args.vis_gt == False:
gt_boxes = None
if pkl_z is None or args.vis_result_box == False:
box3d = None
elif args.dataset_name == 'once':
box3d = pkl_z[idx]['boxes_3d']
else:
box3d = pkl_z[idx]['boxes_lidar']
V.draw_scenes(points=pointcloud, gt_boxes=gt_boxes, ref_boxes=box3d,)
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--bucket_name', type=str, default=None)
parser.add_argument('--dataset_name', type=str, default="kitti") #kitti, waymo, nuscenes. once
parser.add_argument('--val_pkl_path', type=str, default=None)
parser.add_argument('--result_file', type=str, default=None)
parser.add_argument('--visualize_categories', type=list, default=['Pedestrian', 'Vehicle', 'Cyclist'])
parser.add_argument('--vis_gt', type=bool, default=True)
parser.add_argument('--vis_result_box', type=bool, default=False)
parser.add_argument('--fov', type=bool, default=True)
parser.add_argument('--data_root', type=str, default=None)
args = parser.parse_args()
return args
if __name__ =='__main__':
args = parse_config()
if args.bucket_name is not None:
client = client = boto3.client(service_name='s3', endpoint_url='')
vis_scene(args)
| 2,499
| 30.25
| 106
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/open3d_vis_utils.py
|
"""
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True):
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
if isinstance(gt_boxes, torch.Tensor):
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(ref_boxes, torch.Tensor):
ref_boxes = ref_boxes.cpu().numpy()
vis = open3d.visualization.Visualizer()
vis.create_window()
vis.get_render_option().point_size = 1.0
vis.get_render_option().background_color = np.zeros(3)
# draw origin
if draw_origin:
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])
vis.add_geometry(axis_pcd)
pts = open3d.geometry.PointCloud()
pts.points = open3d.utility.Vector3dVector(points[:, :3])
vis.add_geometry(pts)
if point_colors is None:
pts.colors = open3d.utility.Vector3dVector(np.repeat(np.array([[1, 0, 1]]), points.shape[0], axis=0)) # (np.zeros((points.shape[0], 3)))
else:
pts.colors = open3d.utility.Vector3dVector(point_colors)
if gt_boxes is not None:
vis = draw_box(vis, gt_boxes, (0, 1, 0))
if ref_boxes is not None:
vis = draw_box(vis, ref_boxes, (0, 0, 1), ref_labels, ref_scores)
vis.run()
vis.destroy_window()
def translate_boxes_to_open3d_instance(gt_boxes):
"""
4-------- 6
/| /|
5 -------- 3 .
| | | |
. 7 -------- 1
|/ |/
2 -------- 0
"""
center = gt_boxes[0:3]
lwh = gt_boxes[3:6]
axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10])
rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles)
box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh)
line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d)
# import ipdb; ipdb.set_trace(context=20)
lines = np.asarray(line_set.lines)
lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0)
line_set.lines = open3d.utility.Vector2iVector(lines)
return line_set, box3d
def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None):
for i in range(gt_boxes.shape[0]):
line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i])
if ref_labels is None:
line_set.paint_uniform_color(color)
else:
line_set.paint_uniform_color(box_colormap[ref_labels[i]])
vis.add_geometry(line_set)
# if score is not None:
# corners = box3d.get_box_points()
# vis.add_3d_label(corners[5], '%.2f' % score[i])
return vis
| 3,478
| 28.483051
| 145
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/static_kitti.py
|
import pickle
import numpy as np
import pandas as pd
with open('kitti_infos_trainval.pkl', 'rb') as f:
kitti_infos = pickle.load(f)
kitti_car = None
kitti_ped = None
kitti_cyc = None
for i, item in enumerate(kitti_infos):
gt_info = item['annos']
mask_dontcare = gt_info['name'] != 'DontCare'
mask_car = gt_info['name'][mask_dontcare] == 'Car'
mask_ped = gt_info['name'][mask_dontcare] == 'Pedestrian'
mask_cyc = gt_info['name'][mask_dontcare] == 'Cyclist'
car_info = gt_info['gt_boxes_lidar'][mask_car]
ped_info = gt_info['gt_boxes_lidar'][mask_ped]
cyc_info = gt_info['gt_boxes_lidar'][mask_cyc]
if i == 0:
kitti_car = car_info
kitti_ped = ped_info
kitti_cyc = cyc_info
else:
kitti_car = np.concatenate([kitti_car, car_info], axis=0)
kitti_ped = np.concatenate([kitti_ped, ped_info], axis=0)
kitti_cyc = np.concatenate([kitti_cyc, cyc_info], axis=0)
print('car_num: %d' % len(kitti_car))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_car[:, 2]), np.std(kitti_car[:, 2]), np.min(kitti_car[:, 2]), np.max(kitti_car[:, 2]), np.median(kitti_car[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_car[:, 3]), np.std(kitti_car[:, 3]), np.min(kitti_car[:, 3]), np.max(kitti_car[:, 3]), np.median(kitti_car[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_car[:, 4]), np.std(kitti_car[:, 4]), np.min(kitti_car[:, 4]), np.max(kitti_car[:, 4]), np.median(kitti_car[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_car[:, 5]), np.std(kitti_car[:, 5]), np.min(kitti_car[:, 5]), np.max(kitti_car[:, 5]), np.median(kitti_car[:, 5])))
print('ped_num: %d' % len(kitti_ped))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_ped[:, 2]), np.std(kitti_ped[:, 2]), np.min(kitti_ped[:, 2]), np.max(kitti_ped[:, 2]), np.median(kitti_ped[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_ped[:, 3]), np.std(kitti_ped[:, 3]), np.min(kitti_ped[:, 3]), np.max(kitti_ped[:, 3]), np.median(kitti_ped[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_ped[:, 4]), np.std(kitti_ped[:, 4]), np.min(kitti_ped[:, 4]), np.max(kitti_ped[:, 4]), np.median(kitti_ped[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_ped[:, 5]), np.std(kitti_ped[:, 5]), np.min(kitti_ped[:, 5]), np.max(kitti_ped[:, 5]), np.median(kitti_ped[:, 5])))
print('bicycle_num: %d' % len(kitti_cyc))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_cyc[:, 2]), np.std(kitti_cyc[:, 2]), np.min(kitti_cyc[:, 2]), np.max(kitti_cyc[:, 2]), np.median(kitti_cyc[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_cyc[:, 3]), np.std(kitti_cyc[:, 3]), np.min(kitti_cyc[:, 3]), np.max(kitti_cyc[:, 3]), np.median(kitti_cyc[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_cyc[:, 4]), np.std(kitti_cyc[:, 4]), np.min(kitti_cyc[:, 4]), np.max(kitti_cyc[:, 4]), np.median(kitti_cyc[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(kitti_cyc[:, 5]), np.std(kitti_cyc[:, 5]), np.min(kitti_cyc[:, 5]), np.max(kitti_cyc[:, 5]), np.median(kitti_cyc[:, 5])))
kitti_car_df = pd.DataFrame(kitti_car, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
kitti_car_df.to_csv('kitti_ped.csv')
kitti_ped_df = pd.DataFrame(kitti_ped, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
kitti_ped_df.to_csv('kitti_ped.csv')
kitti_cyc_df = pd.DataFrame(kitti_cyc, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
kitti_cyc_df.to_csv('kitti_cyc.csv')
| 3,878
| 68.267857
| 197
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/static_waymo.py
|
import pickle
import numpy as np
import pandas as pd
# with open('nuscenes_infos_10sweeps_train.pkl', 'rb') as f:
# nusc_info = pickle.load(f)
# nusc_car = None
# for i, item in enumerate(nusc_info):
# gt_boxes = item['gt_boxes']
# gt_names = item['gt_names']
# mask = gt_names == 'car'
# car_info = gt_boxes[mask]
# # print(car_info, car_info.shape)
# # if i == 10:
# # break
# if i == 0:
# nusc_car = car_info
# else:
# try:
# nusc_car = np.concatenate([nusc_car, car_info], axis=0)
# except:
# pass
# nusc_df = pd.DataFrame(nusc_car[:, 0:7], columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
# nusc_df.to_csv('nuscenes_car.csv')
# print(nusc_info[2616])
# with open('waymo_processed_data_v0_5_0_infos_train.pkl', 'rb') as f:
# waymo_info = pickle.load(f)
# waymo_car = None
# for i, item in enumerate(waymo_info[::]):
# gt_boxes = item['annos']['gt_boxes_lidar']
# gt_names = item['annos']['name']
# mask = gt_names == 'Vehicle'
# car_info = gt_boxes[mask]
# if i == 0:
# waymo_car = car_info
# else:
# try:
# waymo_car = np.concatenate([waymo_car, car_info], axis=0)
# except:
# pass
# waymo_df = pd.DataFrame(waymo_car, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
# waymo_df.to_csv('waymo_car.csv')
with open('waymo_processed_data_v0_5_0_infos_train.pkl', 'rb') as f:
waymo_info = pickle.load(f)
waymo_car = None
waymo_ped = None
waymo_cyc = None
for i, item in enumerate(waymo_info):
gt_boxes = item['annos']['gt_boxes_lidar']
gt_names = item['annos']['name']
mask_car = gt_names == 'Vehicle'
mask_ped = gt_names == 'Pedestrian'
mask_cyc = gt_names == 'Cyclist'
car_info = gt_boxes[mask_car]
ped_info = gt_boxes[mask_ped]
cyc_info = gt_boxes[mask_cyc]
if i == 0:
waymo_car = car_info
waymo_ped = ped_info
waymo_cyc = cyc_info
else:
try:
waymo_car = np.concatenate([waymo_car, car_info], axis=0)
except:
pass
try:
waymo_ped = np.concatenate([waymo_ped, ped_info], axis=0)
except:
pass
try:
waymo_cyc = np.concatenate([waymo_cyc, cyc_info], axis=0)
except:
pass
print('car_num: %d' % len(waymo_car))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_car[:, 2]), np.std(waymo_car[:, 2]), np.min(waymo_car[:, 2]), np.max(waymo_car[:, 2]), np.median(waymo_car[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_car[:, 3]), np.std(waymo_car[:, 3]), np.min(waymo_car[:, 3]), np.max(waymo_car[:, 3]), np.median(waymo_car[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_car[:, 4]), np.std(waymo_car[:, 4]), np.min(waymo_car[:, 4]), np.max(waymo_car[:, 4]), np.median(waymo_car[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_car[:, 5]), np.std(waymo_car[:, 5]), np.min(waymo_car[:, 5]), np.max(waymo_car[:, 5]), np.median(waymo_car[:, 5])))
print('ped_num: %d' % len(waymo_ped))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_ped[:, 2]), np.std(waymo_ped[:, 2]), np.min(waymo_ped[:, 2]), np.max(waymo_ped[:, 2]), np.median(waymo_ped[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_ped[:, 3]), np.std(waymo_ped[:, 3]), np.min(waymo_ped[:, 3]), np.max(waymo_ped[:, 3]), np.median(waymo_ped[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_ped[:, 4]), np.std(waymo_ped[:, 4]), np.min(waymo_ped[:, 4]), np.max(waymo_ped[:, 4]), np.median(waymo_ped[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_ped[:, 5]), np.std(waymo_ped[:, 5]), np.min(waymo_ped[:, 5]), np.max(waymo_ped[:, 5]), np.median(waymo_ped[:, 5])))
print('cyc_num: %d' % len(waymo_cyc))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_cyc[:, 2]), np.std(waymo_cyc[:, 2]), np.min(waymo_cyc[:, 2]), np.max(waymo_cyc[:, 2]), np.median(waymo_cyc[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_cyc[:, 3]), np.std(waymo_cyc[:, 3]), np.min(waymo_cyc[:, 3]), np.max(waymo_cyc[:, 3]), np.median(waymo_cyc[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_cyc[:, 4]), np.std(waymo_cyc[:, 4]), np.min(waymo_cyc[:, 4]), np.max(waymo_cyc[:, 4]), np.median(waymo_cyc[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(waymo_cyc[:, 5]), np.std(waymo_cyc[:, 5]), np.min(waymo_cyc[:, 5]), np.max(waymo_cyc[:, 5]), np.median(waymo_cyc[:, 5])))
waymo_car_df = pd.DataFrame(waymo_car, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
waymo_car_df.to_csv('waymo_vehicle.csv')
waymo_ped_df = pd.DataFrame(waymo_ped, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
waymo_ped_df.to_csv('waymo_pedestrian.csv')
waymo_cyc_df = pd.DataFrame(waymo_cyc, columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
waymo_cyc_df.to_csv('waymo_cyclist.csv')
| 5,325
| 45.719298
| 197
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/dataset.py
|
from ast import arg
# from http.client import _DataType
import os
import matplotlib.pyplot as plt
import boto3
import io
import pickle
import numpy as np
import argparse
import pickle
import os
from collections import defaultdict
import time, copy
import numpy as np
import torch
import open3d as o3d
import open3d
import matplotlib
from open3d import geometry
import pickle
from itertools import groupby
import open3d_vis_utils as V
import calibration_kitti
class Dataset():
def __init__(self, args):
super().__init__()
self.dataset_name = args.dataset_name
self.data_root = args.data_root
if args.bucket_name is not None:
self.client = boto3.client(service_name='s3', endpoint_url='')
def get_data(self, args, info):
if self.dataset_name == "kitti":
lidar_idx = info['point_cloud']['lidar_idx']
# get image shape
img_shape = info['image']['image_shape']
print(lidar_idx)
pointcloud = self.get_lidar_kitti(args, lidar_idx)[:, :4]
calib = self.get_calib(args, lidar_idx)
pts_rect = calib.lidar_to_rect(pointcloud[:, 0:3])
# FOV_only
if args.fov:
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
pointcloud = pointcloud[pts_valid_flag]
annos = info['annos']
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes = self.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
object_idx = []
for item in info['annos']['name']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = gt_boxes[object_idx, :]
elif self.dataset_name == "nuscenes":
pointcloud = self.get_lidar_with_sweeps(args, info)[:, :3]
object_idx = []
for item in info['gt_names']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = info['gt_boxes'][object_idx, :7]
elif self.dataset_name == "waymo":
pc_info = info['point_cloud']
pointcloud = self.get_lidar_waymo(args, pc_info)[:, :3]
object_idx = []
for item in info['annos']['name']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = info['annos']['gt_boxes_lidar'][object_idx, :7]
elif self.dataset_name == "once":
frame_id = info['frame_id']
sequence_id = info['sequence_id']
pointcloud = self.get_lidar_once(args, sequence_id, frame_id)
object_idx = []
for item in info['annos']['name']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = info['annos']['boxes_3d'][object_idx, :]
return pointcloud, gt_boxes
def get_lidar_once(self, args, seq_id, frame_id):
if args.bucket_name is not None:
bin_path = os.path.join("dataset/once/data", seq_id, 'lidar_roof', '{}.bin'.format(frame_id))
obj = self.client.get_object(Bucket=args.bucket_name, Key=bin_path)
points = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), dtype=np.float32).reshape(-1, 4).copy()
else:
bin_path = os.path.join(self.data_root, seq_id, 'lidar_roof', '{}.bin'.format(frame_id))
points = np.fromfile(bin_path, dtype=np.float32).reshape(-1, 4)
return points
def get_lidar_kitti(self, args, idx):
if args.bucket_name is not None:
lidar_file = os.path.join("dataset", args.dataset_name, "training", 'velodyne', '%s.bin' % idx)
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_file)
lidar_points = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), dtype=np.float32).reshape(-1, 4).copy()
else:
lidar_file = os.path.join(self.data_root, 'training/velodyne', '%s.bin' % idx)
lidar_points = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
return lidar_points
def get_sweep(self, args, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
if args.bucket_name is not None:
lidar_path = os.path.join("", sweep_info['lidar_path'])
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_path)
points_sweep = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), count=-1).reshape([-1, 5])[:, :4].copy()
else:
lidar_path = os.path.join(self.data_root, sweep_info['lidar_path'])
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, args, info):
if args.bucket_name is not None:
lidar_path = os.path.join("dataset/nuScenes", info['lidar_path'])
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_path)
points_pre = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), dtype=np.float32, count=-1).reshape([-1, 5]).copy()
points = points_pre[:, :4]
else:
lidar_path = os.path.join(self.data_root, info['lidar_path'])
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), 1 - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def get_lidar_waymo(self, args, pc_info):
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
if args.bucket_name is not None:
lidar_file = os.path.join("dataset/waymo_0.5.0/waymo_processed_data_v0_5_0", sequence_name, ('%04d.npy' % sample_idx))
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_file)
lidar_points = np.load(io.BytesIO(obj['Body'].read())).copy()
else:
lidar_file = os.path.join(self.data_root, sequence_name, ('%04d.npy' % sample_idx))
lidar_points = np.load(lidar_file)
points_all, NLZ_flag = lidar_points[:, 0:5], lidar_points[:, 5]
points_all = points_all[NLZ_flag == -1]
points_all[:, 3] = np.tanh(points_all[:, 3])
return points_all
def boxes3d_kitti_camera_to_lidar(self, boxes3d_camera, calib):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_camera_copy = copy.deepcopy(boxes3d_camera)
xyz_camera, r = boxes3d_camera_copy[:, 0:3], boxes3d_camera_copy[:, 6:7]
l, h, w = boxes3d_camera_copy[:, 3:4], boxes3d_camera_copy[:, 4:5], boxes3d_camera_copy[:, 5:6]
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def get_calib(self, args, idx):
if args.bucket_name is not None:
calib_file = os.path.join("dataset", args.dataset_name, "training", "calib", ('%s.txt' % idx))
text_bytes = self.client.get_object(Bucket=args.bucket_name, Key=calib_file)
text_bytes = text_bytes['Body'].read().decode('utf-8')
calibrated_res = calibration_kitti.Calibration(io.StringIO(text_bytes), True)
else:
calib_file = os.path.join(self.data_root, 'calib', ('%s.txt' % idx))
calibrated_res = calibration_kitti.Calibration(calib_file, False)
return calibrated_res
| 9,518
| 43.274419
| 131
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/getlist.py
|
import os
from os.path import basename
def file_extension(path):
return os.path.splitext(path)[1]
def file_name(path):
return os.path.splitext(path)[0]
root = #PATH_TO_DATASET
path = os.listdir(root) # 6
path.sort()
#vp = 1 #
file = open(root, 'w')
i = 0
print (path)
for line in path:
#subdir = root
#childpath = os.listdir(subdir)
#mid = int(vp * len(childpath))
#for child in childpath:
#subpath = data + '/' + line + '/' + child;
#d = ' %s' % (i)
subpath = root+'/'+line
print (file_extension(subpath))
if file_extension(subpath) == ".pcd":
print (file_name(line))
t = file_name(line)
file.write(t + '\n')
i = i + 1
#break
print (i)
file.close()
| 744
| 18.605263
| 51
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/split_kitti_train.py
|
import os
import torch
import pickle
import json
import copy
import random
nuscenes_info_path_train = ""
with open(nuscenes_info_path_train, 'rb') as f:
infos_train = pickle.load(f)
random.shuffle(infos_train)
total_len = len(infos_train)
# list_01 = infos_train[:int(total_len*0.01)]
list_05 = infos_train[:int(total_len*0.05)]
# list_10 = infos_train[:int(total_len*0.10)]
# list_25 = infos_train[:int(total_len*0.25)]
# list_50 = infos_train[:int(total_len*0.5)]
# list_75 = infos_train[:int(total_len*0.75)]
#list_700 = 6*infos_train
# with open('01_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_01, f)
with open('05_kitti_infos_train.pkl', 'wb') as f:
pickle.dump(list_05, f)
# with open('10_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_10, f)
# with open('25_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_25, f)
# with open('50_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_50, f)
# with open('75_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_75, f)
# with open('700_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_700, f)
| 1,128
| 24.088889
| 52
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/split_nuscenes_location.py
|
import os
import torch
import pickle
import json
location_info_path = ""
nuscenes_info_path_train = ""
nuscenes_info_path_val = ""
with open(nuscenes_info_path_train, 'rb') as f:
infos_train = pickle.load(f)
with open(nuscenes_info_path_val, 'rb') as f:
infos_val = pickle.load(f)
with open(location_info_path, 'rb') as f:
location_info = json.load(f)
token2location = {}
for info in location_info:
token2location[info['logfile']] = info['location']
location2token = {}
for token in token2location.keys():
if token2location[token] not in location2token.keys():
location2token[token2location[token]] = []
location2token[token2location[token]].append(token)
singapore_onenorth_list_train = []
boston_seaport_list_train = []
singapore_queenstown_list_train = []
singapore_hollandvillage_list_train = []
for info in infos_train:
token = info['cam_front_path'].split('/')[-1].split('_')[0]
location = token2location[token]
if location == 'singapore-onenorth':
singapore_onenorth_list_train.append(info)
elif location == 'boston-seaport':
boston_seaport_list_train.append(info)
elif location =='singapore-queenstown':
singapore_queenstown_list_train.append(info)
elif location == 'singapore-hollandvillage':
singapore_hollandvillage_list_train.append(info)
with open('singapore-onenorth_data_train.pkl', 'wb') as f:
pickle.dump(singapore_onenorth_list_train, f)
with open('boston-seaport_data_train.pkl', 'wb') as f:
pickle.dump(boston_seaport_list_train, f)
with open('singapore-queenstown_data_train.pkl', 'wb') as f:
pickle.dump(singapore_queenstown_list_train, f)
with open('singapore-hollandvillage_data_train.pkl', 'wb') as f:
pickle.dump(singapore_hollandvillage_list_train, f)
singapore_onenorth_list_val = []
boston_seaport_list_val = []
singapore_queenstown_list_val = []
singapore_hollandvillage_list_val = []
for info in infos_val:
token = info['cam_front_path'].split('/')[-1].split('_')[0]
location = token2location[token]
if location == 'singapore-onenorth':
singapore_onenorth_list_val.append(info)
elif location == 'boston-seaport':
boston_seaport_list_val.append(info)
elif location =='singapore-queenstown':
singapore_queenstown_list_val.append(info)
elif location == 'singapore-hollandvillage':
singapore_hollandvillage_list_val.append(info)
with open('singapore-onenorth_data_val.pkl', 'wb') as f:
pickle.dump(singapore_onenorth_list_val, f)
with open('boston-seaport_data_val.pkl', 'wb') as f:
pickle.dump(boston_seaport_list_val, f)
with open('singapore-queenstown_data_val.pkl', 'wb') as f:
pickle.dump(singapore_queenstown_list_val, f)
with open('singapore-hollandvillage_data_val.pkl', 'wb') as f:
pickle.dump(singapore_hollandvillage_list_val, f)
print('singapore_onenorth_list_train:', len(singapore_onenorth_list_train))
print('singapore_onenorth_list_val:', len(singapore_onenorth_list_val))
print('boston_seaport_list_train:', len(boston_seaport_list_train))
print('boston_seaport_list_val', len(boston_seaport_list_val))
print('singapore_queenstown_list_train:', len(singapore_queenstown_list_train))
print('singapore_queenstown_list_val:', len(singapore_queenstown_list_val))
print('singapore_hollandvillage_list_train:', len(singapore_hollandvillage_list_train))
print('singapore_hollandvillage_list_val:', len(singapore_hollandvillage_list_val))
# print(len(infos_train) + len(infos_val))
# print(len(singapore_onenorth_list)+len(boston_seaport_list)+len(singapore_queenstown_list)+len(singapore_hollandvillage_list))
| 3,656
| 33.828571
| 128
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/calibration_kitti.py
|
import numpy as np
def get_calib_from_file(calib_file, oss_flag):
if oss_flag == False:
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
else:
# split the text buffer
lines = calib_file.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
def __init__(self, calib_file, oss_flag):
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file, oss_flag)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| 5,027
| 34.914286
| 116
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/split_nusc_train.py
|
import os
import torch
import pickle
import json
import random
import copy
nuscenes_info_path_train = ""
once_info_path_train = ""
kitti_info = ""
with open(once_info_path_train, 'rb') as f:
infos_train = pickle.load(f)
# random.shuffle(infos_train)
total_len = len(infos_train)
N = 10
infos_train_enlarge = copy.deepcopy(infos_train)
for i in range (1, N):
infos_train_enlarge.extend(infos_train)
list_01 = infos_train[:int(total_len*0.01)]
list_05 = infos_train[:int(total_len*0.05)]
list_10 = infos_train[:int(total_len*0.10)]
with open('01_once_infos_train_vehicle.pkl', 'wb') as f:
pickle.dump(list_01, f)
with open('05_once_infos_train_vehicle.pkl', 'wb') as f:
pickle.dump(list_05, f)
with open('10_once_infos_train_vehicle.pkl', 'wb') as f:
pickle.dump(list_10, f)
| 801
| 21.914286
| 56
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/random_selectlist.py
|
import os
from os.path import basename
import random
# 1: aeroplane
# 2: bicycle
# 3: bird
# 4: boat
# 5: bottle
# 6: bus
# 7: car
# 8: cat
# 9: chair
# 10: cow
# 11: diningtable
# 12: dog
# 13: horse
# 14: motorbike
# 15: person
# 16: pottedplant
# 17: sheep
# 18: sofa
# 19: train
# 20: tvmonitor
ratio = 0.01
in_file_list = open('train.txt')
lines = in_file_list.readlines()
random.shuffle(lines)
lines = lines[0:int(ratio*len(lines))]
out_file = open("train_01_random.txt",'w')
for i in lines:
out_file.write(str(i))
out_file.close()
| 547
| 13.421053
| 42
|
py
|
3DTrans
|
3DTrans-master/tools/tools_utils/static_nusc.py
|
import pickle
import numpy as np
import pandas as pd
with open('nuscenes_infos_10sweeps_train.pkl', 'rb') as f:
nusc_info = pickle.load(f)
nusc_car = None
nusc_ped = None
nusc_cyc = None
for i, item in enumerate(nusc_info):
gt_boxes = item['gt_boxes']
gt_names = item['gt_names']
mask_car = gt_names == 'car'
mask_ped = gt_names == 'pedestrian'
mask_cyc = gt_names == 'bicycle'
car_info = gt_boxes[mask_car]
ped_info = gt_boxes[mask_ped]
cyc_info = gt_boxes[mask_cyc]
if i == 0:
nusc_car = car_info
nusc_ped = ped_info
nusc_cyc = cyc_info
else:
try:
nusc_car = np.concatenate([nusc_car, car_info], axis=0)
except:
pass
try:
nusc_ped = np.concatenate([nusc_ped, ped_info], axis=0)
except:
pass
try:
nusc_cyc = np.concatenate([nusc_cyc, cyc_info], axis=0)
except:
pass
print('car_num: %d' % len(nusc_car))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_car[:, 2]), np.std(nusc_car[:, 2]), np.min(nusc_car[:, 2]), np.max(nusc_car[:, 2]), np.median(nusc_car[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_car[:, 3]), np.std(nusc_car[:, 3]), np.min(nusc_car[:, 3]), np.max(nusc_car[:, 3]), np.median(nusc_car[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_car[:, 4]), np.std(nusc_car[:, 4]), np.min(nusc_car[:, 4]), np.max(nusc_car[:, 4]), np.median(nusc_car[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_car[:, 5]), np.std(nusc_car[:, 5]), np.min(nusc_car[:, 5]), np.max(nusc_car[:, 5]), np.median(nusc_car[:, 5])))
print('ped_num: %d' % len(nusc_ped))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_ped[:, 2]), np.std(nusc_ped[:, 2]), np.min(nusc_ped[:, 2]), np.max(nusc_ped[:, 2]), np.median(nusc_ped[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_ped[:, 3]), np.std(nusc_ped[:, 3]), np.min(nusc_ped[:, 3]), np.max(nusc_ped[:, 3]), np.median(nusc_ped[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_ped[:, 4]), np.std(nusc_ped[:, 4]), np.min(nusc_ped[:, 4]), np.max(nusc_ped[:, 4]), np.median(nusc_ped[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_ped[:, 5]), np.std(nusc_ped[:, 5]), np.min(nusc_ped[:, 5]), np.max(nusc_ped[:, 5]), np.median(nusc_ped[:, 5])))
print('bicycle_num: %d' % len(nusc_cyc))
print('Z--------mean:%f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_cyc[:, 2]), np.std(nusc_cyc[:, 2]), np.min(nusc_cyc[:, 2]), np.max(nusc_cyc[:, 2]), np.median(nusc_cyc[:, 2])))
print('Length---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_cyc[:, 3]), np.std(nusc_cyc[:, 3]), np.min(nusc_cyc[:, 3]), np.max(nusc_cyc[:, 3]), np.median(nusc_cyc[:, 3])))
print('Width---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_cyc[:, 4]), np.std(nusc_cyc[:, 4]), np.min(nusc_cyc[:, 4]), np.max(nusc_cyc[:, 4]), np.median(nusc_cyc[:, 4])))
print('Height---mean: %f, std: %f, min: %f, max: %f, median: %f' % (np.mean(nusc_cyc[:, 5]), np.std(nusc_cyc[:, 5]), np.min(nusc_cyc[:, 5]), np.max(nusc_cyc[:, 5]), np.median(nusc_cyc[:, 5])))
nusc_car_df = pd.DataFrame(nusc_car[:, 0:7], columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
nusc_ped_df = pd.DataFrame(nusc_ped[:, 0:7], columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
nusc_cyc_df = pd.DataFrame(nusc_cyc[:, 0:7], columns=['center_x', 'center_y', 'center_z', 'L', 'W', 'H', 'angle'])
nusc_car_df.to_csv('nuscenes_car.csv')
nusc_ped_df.to_csv('nuscenes_ped.csv')
nusc_cyc_df.to_csv('nuscenes_bicycle.csv')
# print(nusc_info[26])
| 3,878
| 57.772727
| 192
|
py
|
3DTrans
|
3DTrans-master/tools/unsupervised_utils/pointcontrast_utils.py
|
import os
import glob
# from plotly import data
from pcdet.models import load_data_to_gpu
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from torch.nn.utils import clip_grad_norm_
from ssl_utils.semi_utils import random_world_flip, random_world_rotation, random_world_scaling
from pcdet.models.detectors.unsupervised_model.pvrcnn_plus_backbone import HardestContrastiveLoss
# @torch.no_grad()
# def get_positive_pairs(batch_dict_1, batch_dict_2):
# augmentation_functions = {
# 'random_world_flip': random_world_flip,
# 'random_world_rotation': random_world_rotation,
# 'random_world_scaling': random_world_scaling
# }
# for bs_idx in range(len(batch_dict_1)):
# aug_list_1 = batch_dict_1['augmentation_list'][bs_idx]
# aug_list_2 = batch_dict_2['augmentation_list'][bs_idx]
# aug_param_1 = batch_dict_1['augmentation_params'][bs_idx]
# aug_param_2 = batch_dict_2['augmentation_params'][bs_idx]
def pointcontrast(model, batch_dict_1, batch_dict_2, loss_cfg, dist, voxel_size, point_cloud_range):
load_data_to_gpu(batch_dict_1)
load_data_to_gpu(batch_dict_2)
if not dist:
batch_dict_1 = model(batch_dict_1)
batch_dict_2 = model(batch_dict_2)
else:
batch_dict_1, batch_dict_2 = model(batch_dict_1, batch_dict_2)
contrastive_loss = HardestContrastiveLoss(loss_cfg, voxel_size, point_cloud_range)
pos_loss, neg_loss = contrastive_loss.get_hardest_contrastive_loss(batch_dict_1, batch_dict_2)
loss = pos_loss + neg_loss
return loss
def train_pointcontrast_one_epoch(model, optimizer, data_loader, lr_scheduler,
voxel_size, point_cloud_range,
accumulated_iter, cfg, rank, tbar, total_it_each_epoch,
dataloader_iter, tb_log=None, leave_pbar=False, dist=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
disp_dict = {}
for cur_epoch in range(total_it_each_epoch):
try:
batch_1, batch_2 = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(data_loader)
batch_1, batch_2 = next(dataloader_iter)
print('new sample dataloader')
try:
cur_lr = float(optimizer.lr)
except StopIteration:
cur_lr = optimizer.param_group[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
optimizer.zero_grad()
loss = pointcontrast(model, batch_1, batch_2, cfg.LOSS_CFG, dist, voxel_size, point_cloud_range)
loss.backward()
clip_grad_norm_(model.parameters(), cfg.GRAD_NORM_CLIP)
optimizer.step()
lr_scheduler.step(accumulated_iter)
accumulated_iter += 1
disp_dict.update({
'loss': loss.item(),
'lr': cur_lr
})
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
# for key, val in tb_dict.items():
# tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, lr_scheduler, cfg, voxel_size, point_cloud_range,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
train_sampler, lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False, dist=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader) # total iterations set to labeled set
assert merge_all_iters_to_one_epoch is False
train_loader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_pointcontrast_one_epoch(
model=model,
optimizer=optimizer,
data_loader=train_loader,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter,
point_cloud_range=point_cloud_range,
voxel_size=voxel_size, cfg=cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dist = dist,
dataloader_iter=train_loader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 2), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
"""
def update_ema_variables_with_fixed_momentum(model, ema_model, alpha):
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
"""
| 8,215
| 39.27451
| 117
|
py
|
3DTrans
|
3DTrans-master/tools/eval_utils/dataset_statistic_check.py
|
import os
import pickle
import io
from pathlib import Path
from petrel_client.client import Client
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io as sk_io
client = Client("~/.petreloss.conf")
def list_oss_dir(oss_path, with_info=False):
files_iter = client.get_file_iterator(oss_path)
if with_info:
file_list = {p: k for p, k in files_iter}
else:
file_list = [p for p, k in files_iter]
return file_list
def load_pkl_oss(oss_path):
pkl_bytes = client.get(oss_path)
infos = pickle.load(io.BytesIO(pkl_bytes))
return infos
def splite_bbox(list_bbox):
# should mainly calculate the z statistics
# for kitti: x-y-z-l-w-h
# for waymo: x-y-z-l-w-h
bbox_z = []
bbox_l = []
bbox_w = []
bbox_h = []
for bbox in list_bbox:
bbox_z.append(bbox[2])
bbox_l.append(bbox[3])
bbox_w.append(bbox[4])
bbox_h.append(bbox[5])
bbox_z_np = np.array(bbox_z)
bbox_l_np = np.array(bbox_l)
bbox_w_np = np.array(bbox_w)
bbox_h_np = np.array(bbox_h)
return bbox_z_np, bbox_l_np, bbox_h_np, bbox_w_np
def process_object_info(class_info, cls_name=None, get_abnorm_idx=False):
info_order = ["z", "l", "h", "w"]
statis = {}
for idx, element in enumerate(splite_bbox(class_info)):
if cls_name is not None:
print(f"Process the class: {cls_name}")
print(f"Current Process the Information along: {info_order[idx]}")
statis[info_order[idx]] = get_statistic(element, get_abnorm=get_abnorm_idx)
# draw_hist(element)
return statis
def get_statistic(arr, get_abnorm=False):
mean_arr = np.round(np.mean(arr), decimals=2)
median_arr = np.round(np.median(arr), decimals=2)
std_arr = np.round(np.std(arr), decimals=2)
min_arr = np.round(np.min(arr), decimals=2)
max_arr = np.round(np.max(arr), decimals=2)
print(f"mean: {mean_arr}, std: {std_arr}, min: {min_arr}, max {max_arr}, median {median_arr}")
statis = {}
statis = {"mean":mean_arr, "std": std_arr, "min": min_arr, "max": max_arr, "median": median_arr}
if get_abnorm:
abnorm_min = mean_arr - 3 * std_arr
abnorm_max = mean_arr + 3 * std_arr
abnorm_max_index = np.where(arr > abnorm_max)
abnorm_min_index = np.where(arr < abnorm_min)
abnorm_idx_list = list(abnorm_max_index) + list(abnorm_min_index)
statis["abnorm_obj_idx"] = abnorm_idx_list
return statis
return statis
def get_image(root_data_path, idx):
"""
Loads image for a sample
Args:
idx: int, Sample index
Returns:
image: (H, W, 3), RGB Image
"""
img_file = root_data_path + 'image_2'+ str('%s.png' % idx)
# assert img_file.exists(), f"Image path {img_file} not exists"
print(f"Try to load image: {img_file}")
image = sk_io.imread(img_file)
image = image.astype(np.float32)
image /= 255.0
return image
def draw_hist(a, num_bins=20):
plt.figure(figsize=(20,8),dpi=80)
plt.hist(a,num_bins,density=True)
plt.grid(alpha=0.1)
plt.show()
def add_rect_to_image(car_image_loc, abnorm_flag=False):
# print(f"Car loc in func is: {car_image_loc}" )
width = car_image_loc[2] - car_image_loc[0]
height = car_image_loc[3] - car_image_loc[1]
center = (car_image_loc[0], car_image_loc[1])
if abnorm_flag:
rect = patches.Rectangle(center, width, height, linewidth=2, edgecolor='r', facecolor='none')
else:
rect = patches.Rectangle(center, width, height, linewidth=1, edgecolor='g', facecolor='none')
# print(f"the start: {center}, width: {width} and height: {height}")
return rect
def kitti_process(abnorm_info_types=["z"]):
kitti_path = #PATH TO DATASET
bbox_info_pointer = {"x":0, "y":1, "z":2, "l":3, "w":4, "h":5}
kitti_infos = load_pkl_oss(kitti_path)
kitti_classes = ['Car','Pedestrian', 'Cyclist']
kitti_car_info = []
kitti_car_info_image = []
kitti_car_frameIdx = []
kitti_car_info_index = []
kitti_ped_info = []
kitti_cyc_info = []
kitti_idx_list = []
kitti_frame_car_counter = []
kitti_info_class = {}
frame_cnt = len(kitti_infos)
for idx, info in enumerate(kitti_infos):
lidar_idx = info["point_cloud"]["lidar_idx"]
kitti_idx_list.append(lidar_idx)
anno_info = info["annos"]
obj_number = anno_info["name"].shape[0]
car_counter = 0
for i in range(obj_number):
if anno_info["name"][i] == "Pedestrian":
kitti_ped_info.append(anno_info["gt_boxes_lidar"][i])
elif anno_info["name"][i] == "Car":
car_counter += 1
kitti_car_info.append(anno_info["gt_boxes_lidar"][i])
kitti_car_info_image.append(anno_info['bbox'][i])
elif anno_info["name"][i] == "Cyclist":
kitti_cyc_info.append(anno_info["gt_boxes_lidar"][i])
else:
continue
# used to fetch image/lidar files
kitti_car_frameIdx.extend([lidar_idx] * car_counter)
# uesd to fetch frame info
kitti_car_info_index.extend([idx] * car_counter)
kitti_frame_car_counter.extend([car_counter] *car_counter)
kitti_info_class = {"car": kitti_car_info, "ped": kitti_ped_info, "cyc": kitti_cyc_info, "car_frameIdx":kitti_car_frameIdx}
print(f"The totoal frame cout: {frame_cnt}")
print(f"Car Counts: {len(kitti_car_info)}, Ped: {len(kitti_ped_info)}, Cyc: {len(kitti_cyc_info)}")
assert len(kitti_car_info_index) == len(kitti_car_frameIdx) == len(kitti_frame_car_counter)
for cls in kitti_info_class.keys():
cls_info = kitti_info_class[cls]
if cls != "car":
continue
print(f"Current Process {cls}")
statis = process_object_info(cls_info, cls_name=cls, get_abnorm_idx=True)
max_min_order = {"max": 0, "min": 1}
for abnorm_info_type in abnorm_info_types:
statis_info = statis[abnorm_info_type]
kitti_image_save_path = ""
for limit in max_min_order.keys():
cur_save_path = os.path.join(kitti_image_save_path, abnorm_info_type, limit)
if not os.path.exists(cur_save_path):
os.makedirs(cur_save_path)
print(f"Make folder: {cur_save_path}")
car_info_image_abnorm = [kitti_car_info_image[k] for k in statis_info['abnorm_obj_idx'][max_min_order[limit]]]
for idx, abnorm_idx in enumerate(statis_info['abnorm_obj_idx'][max_min_order[limit]]):
# 0: 偏大 1: 偏小
# abnorm_idx = 10
abnorm_frame_idx = kitti_car_frameIdx[abnorm_idx]
abnorm_info_idx = kitti_car_info_index[abnorm_idx]
car_image_loc = car_info_image_abnorm[idx]
ori_image_path = #PATH_TO_DATASET
print(f"Load image from {ori_image_path}")
image_name = kitti_infos[abnorm_info_idx]["point_cloud"]["lidar_idx"] + ".png"
full_path = os.path.join(cur_save_path, image_name)
# if os.path.exists(full_path):
# continue
image_bytes = client.get(ori_image_path)
image_npy = sk_io.imread(io.BytesIO(image_bytes))
plt.figure(figsize=(30, 15))
fig, ax = plt.subplots()
# print(f"Car loc is: {car_image_loc}" )
rect = add_rect_to_image(car_image_loc, abnorm_flag=True)
ax.add_patch(rect)
all_car_images = kitti_infos[abnorm_info_idx]["annos"]["bbox"]
all_car_lidar = kitti_infos[abnorm_info_idx]["annos"]["gt_boxes_lidar"]
car_counter = 0
for type_ in kitti_infos[abnorm_info_idx]["annos"]["name"]:
if type_ == 'Car':
car_counter += 1
for i in range(car_counter):
car_loc = all_car_images[i]
# print(f"Car loc in all bbox is: {car_loc}" )
rect_ = add_rect_to_image(car_loc, abnorm_flag=False)
plt.text(car_loc[0], car_loc[1], str(round(all_car_lidar[i][bbox_info_pointer[abnorm_info_type]],2)))
ax.add_patch(rect_)
# break
ax.imshow(image_npy)
# ax.add_image(image_npy)
# plt.show()
image_name = kitti_infos[abnorm_info_idx]["point_cloud"]["lidar_idx"] + ".png"
full_path = os.path.join(cur_save_path, image_name)
print(f"save abnorm statistic image to {full_path}")
# plt.close(fig)
plt.savefig(full_path)
if __name__ == "__main__":
kitti_process(abnorm_info_types=["z", "l", "h", "w"])
| 8,936
| 37.356223
| 127
|
py
|
3DTrans
|
3DTrans-master/tools/eval_utils/eval_utils.py
|
import pickle
import time
import numpy as np
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
def eval_one_epoch_parallel(cfg, model, show_db, dataloader_s1, dataloader_s2, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
if show_db == 1:
dataset = dataloader_s1.dataset
class_names = dataset.class_names
det_annos = []
elif show_db == 2:
dataset = dataloader_s2.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
if show_db == 1:
progress_bar = tqdm.tqdm(total=len(dataloader_s1), leave=True, desc='eval', dynamic_ncols=True)
elif show_db == 2:
progress_bar = tqdm.tqdm(total=len(dataloader_s2), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
if show_db == 1:
dataloader_iter_2 = iter(dataloader_s2)
for i, batch_1 in enumerate(dataloader_s1):
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(dataloader_s2)
batch_2 = next(dataloader_iter_2)
batch_dict = common_utils.merge_two_batch_dict(batch_1, batch_2)
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict, _, _ = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
elif show_db == 2:
dataloader_iter_1 = iter(dataloader_s1)
for i, batch_2 in enumerate(dataloader_s2):
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(dataloader_s1)
batch_1 = next(dataloader_iter_1)
batch_dict = common_utils.merge_two_batch_dict(batch_1, batch_2)
load_data_to_gpu(batch_dict)
with torch.no_grad():
_, _, pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
| 10,327
| 37.251852
| 151
|
py
|
3DTrans
|
3DTrans-master/tools/show_squence_demo/demo.py
|
import os
import copy
import pickle
from collections import defaultdict
import json
import numpy as np
from pathlib import Path
import argparse
import torch
from utils import Visualizer, LabelLUT
from utils.base_dataset import DataCollect
from pcdet.ops.roiaware_pool3d.roiaware_pool3d_utils import points_in_boxes_gpu
def sequence_visualize3d(**infos):
data_collect = DataCollect(color_attr=[
"class",
# "id"
],
text_attr=[
# "class",
# "id",
# "score",
],
show_text=True)
data_collect.offline_process_infos(**infos)
lut = LabelLUT()
lut_labels = {
"track": [1., 1., 1.],
"gt": [1., 0., 0.],
"detect": [0., 1., 0.],
"detect_pro": [0.7, 0.2, 0.7],
}
lut_labels = {
"gt_Car": [0., 1., 0.], # once
"gt_Truck": [0., 1., 0.],
"gt_Bus": [0., 1., 0.],
"gt_Pedestrian": [0., 0., 1.],
"gt_Cyclist": [1., 0.0, 0.0],
"gt_car": [0., 1., 0.], # nuscenes
"gt_traffic_cone": [1.0, 1.0, 0.25],
"gt_truck": [0., 1., 0.],
"gt_pedestrian": [0., 0., 1.0],
"gt_construction_vehicle": [0., 1., 0.],
"gt_bus": [0., 1., 0.],
"gt_trailer": [0., 0.68627451, 0.],
"gt_motorcycle": [1., 0., 0.],
"gt_bicycle": [1., 0., 0.],
"gt_barrier": [0.19607843, 0.47058824, 1.],
}
for key, val in lut_labels.items():
lut.add_label(key, key, val)
# lut = None
_3dal_vis = Visualizer(fps=4)
_3dal_vis.visualize_dataset(data_collect, prefix="frame id", lut=lut)
def load_once(data_path, seq_id):
info_path = os.path.join(data_path, seq_id)
annos_path = os.path.join(info_path, seq_id + '.json')
frame_ids_list = list()
pts_list = list()
pts_label_list = list()
gt_list = list()
with open(annos_path, 'r') as f:
annos = json.load(f)
frames = annos['frames'][:3] # We only put three once frames here as an example.
for frame in frames:
if 'annos' in frame.keys():
sequence_id = frame['sequence_id']
frame_id = frame['frame_id']
pose = frame['pose']
annos = frame['annos']
names = annos['names']
boxes_3d = np.array(annos['boxes_3d'])
frame_ids_list.append(frame_id)
bin_path = os.path.join(info_path, 'lidar_roof', '{}.bin'.format(frame_id))
points = np.fromfile(bin_path, dtype=np.float32).reshape(-1, 4)[:, :3]
pts_list.append(points)
gt_list.append(
{
"bbox": boxes_3d,
"class": names,
})
box_idxs = points_in_boxes_gpu(
torch.from_numpy(points).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(boxes_3d).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
pts_label_list.append(box_idxs)
info = {
"idx_names": frame_ids_list,
"pts": pts_list,
"pts_label": pts_label_list,
"gt": gt_list,
}
return info
def load_nuscenes(data_path, seq_id):
info_path = os.path.join(data_path, 'nuscenes_infos_10sweeps_train.pkl')
annos = pickle.load(open(info_path, "rb"))
frame_ids_list = list()
pts_list = list()
pts_label_list = list()
gt_list = list()
for anno in annos:
lidar_path = anno['lidar_path']
cur_seq_name = lidar_path.split("__LIDAR_TOP__")[0].split("LIDAR_TOP/")[-1]
if cur_seq_name != seq_id:
continue
gt_names = anno['gt_names']
gt_boxes = anno['gt_boxes'][:,:7]
frame_id = lidar_path.split('_')[-1].strip('.pcd.bin')
bin_path = os.path.join(data_path, lidar_path)
points = np.fromfile(bin_path, dtype=np.float32).reshape([-1, 5])[:, :3]
print(points.shape)
boxes_3d = []
names = []
for box, name in zip(gt_boxes, gt_names):
if name != 'ignore':
boxes_3d.append(box)
names.append(name)
boxes_3d = np.array(boxes_3d)
if len(points) and len(boxes_3d):
box_idxs = points_in_boxes_gpu(
torch.from_numpy(points).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(boxes_3d).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
else:
# box_idxs = np.zeros(len(points)) - 1
continue
gt_list.append(
{
"bbox": boxes_3d,
"class": names,
})
pts_list.append(points)
frame_ids_list.append(frame_id)
pts_label_list.append(box_idxs)
info = {
"idx_names": frame_ids_list,
"pts": pts_list,
"pts_label": pts_label_list,
"gt": gt_list,
}
return info
if __name__ == '__main__':
np.set_printoptions(precision=3, linewidth=500,
threshold=np.inf, suppress=True)
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--data_file', type=str, default="once_data", help='the data path')
parser.add_argument('--seq_id', type=str, default="000076", help='the sequence id')
# parser.add_argument('--data_file', type=str, default="nuscenes_data", help='the data path of nuscenes')
# parser.add_argument('--seq_id', type=str, default="n015-2018-07-18-11-07-57+0800", help='the sequence id of nuscenes')
parser.add_argument('--func', type=str, default='once', help='choose the data')
args = parser.parse_args()
if args.func == 'once':
info = load_once(args.data_file, args.seq_id)
elif args.func == 'nuscenes':
info = load_nuscenes(args.data_file, args.seq_id)
sequence_visualize3d(**info)
| 6,236
| 32.532258
| 124
|
py
|
3DTrans
|
3DTrans-master/tools/show_squence_demo/utils/base_dataset.py
|
import copy
import numpy as np
from collections import defaultdict
from .components import Object3D
class DataCollect:
def __init__(self, name='Waymo',
color_attr=[],
text_attr=[],
show_text=False):
# super().__init__(name=name)
self.name = name
self.num_classes = 3
self.datas = list()
self.data_labels = list()
self.labels = list()
self.idx_names = list()
self.label_to_names = {}
self.color_attr = color_attr
self.text_attr = text_attr
self.show_text = show_text
def offline_process_infos(self, **infos):
self.datas.clear()
self.labels.clear()
self.data_labels.clear()
infos_keys = infos.keys()
if "idx_names" not in infos_keys or "pts" not in infos_keys:
raise ValueError("Need idx_names' or pts' infos")
pts_len = len(infos["pts"])
idx_len = len(infos["idx_names"])
assert pts_len == idx_len, f"length of pts != idx_names"
names = dict()
for idx in range(pts_len):
pts = infos["pts"][idx]
pts.astype(np.float32)
self.datas.append(pts)
self.data_labels.append(infos['pts_label'][idx])
idx_n = infos["idx_names"][idx]
self.idx_names.append(idx_n)
label_info = defaultdict(dict)
for key in infos_keys:
if key == "idx_names" or "pts" in key:
continue
if key not in names.keys():
names[key] = set()
bbox = infos[key][idx]["bbox"]
bbox_len = len(bbox)
repeat_name = np.repeat([key], bbox_len)
label_info[key]["name"] = repeat_name
label_info[key]["bbox"] = bbox
meta_center = copy.deepcopy(bbox[:, :3])
label_info[key]["meta_center"] = meta_center
# @todo: other features
if "id" in infos[key][idx].keys():
label_info[key]["id"] = infos[key][idx]["id"]
if "id" in self.color_attr:
names[key].update(label_info[key]["id"])
if "class" in infos[key][idx].keys():
label_info[key]["class"] = infos[key][idx]["class"]
if "class" in self.color_attr:
names[key].update(label_info[key]["class"])
if "score" in infos[key][idx].keys():
label_info[key]["score"] = infos[key][idx]["score"]
self.labels.append(label_info)
self.label_to_names = self.get_label_to_names(names)
def get_label_to_names(self, names):
"""Returns a label to names dictonary object.
Returns:
A dict where keys are label numbers and
values are the corresponding names.
"""
if len(self.color_attr) == 0:
return dict.fromkeys(names.keys(), list())
new_names = dict()
for key, val in names.items():
if len(val) == 0:
new_names[key] = []
for sub_name in val:
new_name = key+"_"+str(sub_name)
new_names[new_name] = []
return new_names
def is_tested(self, attr):
"""Checks whether a datum has been tested.
Args:
attr: The attributes associated with the datum.
Returns:
This returns True if the test result has been stored for the datum with the
specified attribute; else returns False.
"""
return False
def save_test_result(self, results, attr):
"""Saves the output of a model.
Args:
results: The output of a model for the datum associated with the attribute passed.
attr: The attributes that correspond to the outputs passed in results.
"""
return
# @staticmethod
# def read_lidar(path):
# """Reads lidar data from the path provided.
# Returns:
# A data object with lidar information.
# """
# assert Path(path).exists()
# return np.fromfile(path, dtype=np.float32).reshape(-1, 6)name_ns
def read_label(self, labels):
"""Reads labels of bound boxes.
Returns:
The data objects with bound boxes information.
"""
objects = []
names = labels.keys()
for name in names:
attr_keys = labels[name].keys()
name_ns = labels[name]["name"]
bboxs = labels[name]["bbox"]
meta_centers = labels[name]["meta_center"]
bboxs_len = len(bboxs)
for i in range(bboxs_len):
center = [float(bboxs[i][0]), float(
bboxs[i][1]), float(bboxs[i][2])]
size = [float(bboxs[i][4]), float(
bboxs[i][5]), float(bboxs[i][3])]
heading = float(bboxs[i][6])
meta_center = [float(meta_centers[i][0]), float(
meta_centers[i][1]), float(meta_centers[i][2])]
cls = labels[name]["class"][i] if "class" in attr_keys else ""
score = labels[name]["score"][i] if "score" in attr_keys else 1.
id = labels[name]["id"][i] if "id" in attr_keys else ""
show_name = name_ns[i]
if "class" in self.color_attr:
show_name = name_ns[i] + "_"+ cls
elif "id" in self.color_attr and id!= "":
show_name = name_ns[i] + "_"+ str(id)
text = ""
if "name" in self.text_attr:
text = text + " " + name_ns[i]
if "class" in self.text_attr:
text = text + " " + cls
if "score" in self.text_attr:
text = text + " " + f"{score:.2f}"
if "id" in self.text_attr:
text = text + " " + str(id)
text = text.strip()
show_text=self.show_text
if text == "":
show_text = False
objects.append(
Object3D(center=center,
size=size,
yaw=heading,
name=show_name,
cls=cls,
score=score,
id=id,
text=text,
show_meta=show_text,
meta_center=meta_center,
show_arrow=True))
return objects
def get_split_list(self):
"""Returns the list of data splits available.
Args:
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
Returns:
A dataset split objeprefix
ValueError: Indicates that the sget_label_to_namesplit name passed is incorrect. The
split name should be one of 'training', 'test', 'validation', or
'all'.
"""
spilt_list = []
for id in range(len(self.datas)):
data_dict = {'data': self.datas[id],
'label': self.labels[id],
'data_label': self.data_labels[id],
}
spilt_list.append(data_dict)
return spilt_list
def __len__(self):
return len(self.datas)
def get_split(self, prefix):
"""Returns a dataset split.
Args:
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
Returns:
A dataset split object providing the requested subset of the data.
"""
return DataSplit(self, self.idx_names, prefix)
class DataSplit():
def __init__(self, dataset, idx_names, prefix=""):
self.idx_names = idx_names
self.data_list = dataset.get_split_list()
self.dataset = dataset
self.prefix = prefix
def __len__(self):
return len(self.data_list)
def get_data(self, idx):
data_dict = self.data_list[idx]
pts = data_dict['data']
label = self.dataset.read_label(data_dict['label'])
pts_label = data_dict['data_label']
data = {
'point': pts,
'feat': None,
'bounding_boxes': label,
'pts_label': pts_label,
}
return data
def get_attr(self, idx):
attr = {'name': self.prefix+":"+self.idx_names[idx]}
return attr
| 8,841
| 31.627306
| 96
|
py
|
3DTrans
|
3DTrans-master/tools/show_squence_demo/utils/gui.py
|
import math
import sys
import numpy as np
import threading
import open3d as o3d
from open3d.visualization import gui
from open3d.visualization import rendering
from collections import deque
from .components import *
import time
import os
class Model:
"""The class that helps build visualization models based on attributes,
data, and methods.
"""
# bounding_box_prefix = "Bounding Boxes/"
bounding_box_prefix = "bbox/"
class BoundingBoxData:
"""The class to define a bounding box that is used to describe the
target location.
Args:
name: The name of the pointcloud array.
boxes: The array of pointcloud that define the bounding box.
"""
def __init__(self, name, boxes):
self.name = name
self.boxes = boxes
def __init__(self):
# Note: the tpointcloud cannot store the actual data arrays, because
# the tpointcloud requires specific names for some arrays (e.g.
# "positions", "colors"). So the tpointcloud exists for rendering and
# initially only contains the "positions" array.
self.tclouds = {} # name -> tpointcloud
self.tcams = {} # name -> tcams
self.data_names = [] # the order data will be displayed / animated
self.bounding_box_data = [] # [BoundingBoxData]
self._data = {} # name -> {attr_name -> numpyarray}
self._known_attrs = {} # name -> set(attrs)
self._attr2minmax = {} # only access in _get_attr_minmax()
self._attr_rename = {"label": "labels", "feat": "feature"}
def _init_data(self, name):
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
tcam = dict()
self.tcams[name] = tcam
self._data[name] = {}
self.data_names.append(name)
def is_loaded(self, name):
"""Check if the data is loaded."""
if name in self._data:
return len(self._data[name]) > 0
else:
# if the name isn't in the data, presumably it is loaded
# (for instance, if this is a bounding box).
return True
def load(self, name, fail_if_no_space=False):
"""If data is not loaded, then load the data."""
assert (False) # pure virtual
def unload(self, name):
assert (False) # pure virtual
def create_point_cloud(self, data):
"""Create a point cloud based on the data provided.
The data should include name and points.
"""
assert ("name" in data) # name is a required field
assert ("points" in data) # 'points' is a required field
name = data["name"]
pts = self._convert_to_numpy(data["points"])
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
known_attrs = set()
if pts.shape[1] >= 4:
# We can't use inplace Tensor creation (e.g. from_numpy())
# because the resulting arrays won't be contiguous. However,
# TensorList can be inplace.
xyz = pts[:, [0, 1, 2]]
tcloud.point["positions"] = Visualizer._make_tcloud_array(xyz,
copy=True)
else:
tcloud.point["positions"] = Visualizer._make_tcloud_array(pts)
if 'pts_label' in data.keys():
# test_dict = {'bg':0, 'Vehicle':1, 'Pedestrian':2, 'Cyclist':'3'}
pts_color = np.ones_like(pts) * 0.3
bboxes = data['bounding_boxes']
for k, bbox in enumerate(bboxes):
box_class = bbox.label_class
label_color = np.array(self.lut.labels[box_class].color, np.float32)
point_indices = (data['pts_label'] == k)
pts_color[point_indices, :] = label_color
tcloud.point["colors"] = Visualizer._make_tcloud_array(pts_color)
self.tclouds[name] = tcloud
# Add scalar attributes and vector3 attributes
attrs = {}
for k, v in data.items():
attr = self._convert_to_numpy(v)
if attr is None or isinstance(v, dict):
continue
attr_name = k
if attr_name == "point":
continue
new_name = self._attr_rename.get(attr_name)
if new_name is not None:
attr_name = new_name
if len(attr.shape) == 1 or len(attr.shape) == 2:
attrs[attr_name] = attr
known_attrs.add(attr_name)
self._data[name] = attrs
self._known_attrs[name] = known_attrs
def create_cams(self, name, cam_dict, key='img', update=False):
"""Create images based on the data provided.
The data should include name and cams.
"""
tcam = dict()
for k, v in cam_dict.items():
img = self._convert_to_numpy(v[key])
tcam[k] = o3d.t.geometry.Image(Visualizer._make_tcloud_array(img))
self.tcams[name] = tcam
if update:
self._data[name]['cams'] = cam_dict
def _convert_to_numpy(self, ary):
if isinstance(ary, list):
try:
return np.array(ary, dtype='float32')
except TypeError:
return None
elif isinstance(ary, np.ndarray):
if len(ary.shape) == 2 and ary.shape[0] == 1:
ary = ary[0] # "1D" array as 2D: [[1, 2, 3,...]]
if ary.dtype.name.startswith('int'):
return np.array(ary, dtype='float32')
else:
return ary
try:
import tensorflow as tf
if isinstance(ary, tf.Tensor):
return self._convert_to_numpy(ary.numpy())
except:
pass
try:
import torch
if isinstance(ary, torch.Tensor):
return self._convert_to_numpy(ary.detach().cpu().numpy())
except:
pass
return None
def get_attr(self, name, attr_name):
"""Get an attribute from data based on the name passed."""
if name in self._data:
attrs = self._data[name]
if attr_name in attrs:
return attrs[attr_name]
return None
def get_attr_shape(self, name, attr_name):
"""Get a shape from data based on the name passed."""
attr = self.get_attr(name, attr_name)
if attr is not None:
return attr.shape
return []
def get_attr_minmax(self, attr_name, channel):
"""Get the minimum and maximum for an attribute."""
attr_key_base = attr_name + ":" + str(channel)
attr_min = 1e30
attr_max = -1e30
for name in self._data.keys():
key = name + ":" + attr_key_base
if key not in self._attr2minmax:
attr = self.get_attr(name, attr_name)
if attr is None: # clouds may not have all the same attributes
continue
if len(attr.shape) > 1:
attr = attr[:, channel]
self._attr2minmax[key] = (attr.min(), attr.max())
amin, amax = self._attr2minmax[key]
attr_min = min(attr_min, amin)
attr_max = max(attr_max, amax)
if attr_min > attr_max:
return (0.0, 0.0)
return (attr_min, attr_max)
def get_available_attrs(self, names):
"""Get a list of attributes based on the name."""
attr_names = None
for n in names:
known = self._known_attrs.get(n)
if known is not None:
if attr_names is None:
attr_names = known
else:
attr_names = attr_names.intersection(known)
if attr_names is None:
return []
return sorted(attr_names)
def calc_bounds_for(self, name):
"""Calculate the bounds for a pointcloud."""
if name in self.tclouds and not self.tclouds[name].is_empty():
tcloud = self.tclouds[name]
# Ideally would simply return tcloud.compute_aabb() here, but it can
# be very slow on macOS with clang 11.0
pts = tcloud.point["positions"].numpy()
min_val = (pts[:, 0].min(), pts[:, 1].min(), pts[:, 2].min())
max_val = (pts[:, 0].max(), pts[:, 1].max(), pts[:, 2].max())
return [min_val, max_val]
else:
return [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
class DataModel(Model):
"""The class for data i/o and storage of visualization.
Args:
userdata: The dataset to be used in the visualization.
"""
def __init__(self, userdata):
super().__init__()
# We could just create the TPointCloud here, but that would cause the UI
# to block. If we do it on load then the loading dialog will display.
self._name2srcdata = {}
self.bounding_box_data = []
for d in userdata:
name = d["name"]
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2srcdata[name] = d
if 'bounding_boxes' in d:
self.bounding_box_data.append(
Model.BoundingBoxData(name, d['bounding_boxes']))
def load(self, name, fail_if_no_space=False):
"""Load a pointcloud based on the name provided."""
if self.is_loaded(name):
return True
self.create_point_cloud(self._name2srcdata[name])
def unload(self, name):
"""Unload a pointcloud."""
pass
class DatasetModel(Model):
"""The class used to manage a dataset model.
Args:
dataset: The 3D ML dataset to use. You can use the base dataset, sample datasets , or a custom dataset.
split: A string identifying the dataset split that is usually one of 'training', 'test', 'validation', or 'all'.
indices: The indices to be used for the datamodel. This may vary based on the split used.
"""
def __init__(self, dataset, indices, prefix, lut=None):
super().__init__()
self._dataset = None
self._name2datasetidx = {}
self._memory_limit = 10240 * 1024 * 1024 # memory limit in bytes
self._current_memory_usage = 0
self._cached_data = deque()
self.lut = lut
self._dataset = dataset.get_split(prefix)
if len(self._dataset) > 0:
if indices is None:
indices = range(0, len(self._dataset))
# Some results from get_split() (like "training") are randomized.
# Sort, so that the same index always returns the same piece of data.
# path2idx = {}
# for i in range(0, len(self._dataset.path_list)):
# path2idx[self._dataset.path_list[i]] = i
# real_indices = [path2idx[p] for p in sorted(path2idx.keys())]
# indices = [real_indices[idx] for idx in indices]
# SemanticKITTI names its items <sequence#>_<timeslice#>,
# "mm_nnnnnn". We'd like to use the hierarchical feature of the tree
# to separate the sequences. We cannot change the name in the dataset
# because this format is used to report algorithm results, so do it
# here.
underscore_to_slash = False
if dataset.__class__.__name__ == "SemanticKITTI":
underscore_to_slash = True
for i in indices:
info = self._dataset.get_attr(i)
name = info["name"]
if underscore_to_slash:
name = name.replace("_", "/")
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2datasetidx[name] = i
if dataset.__class__.__name__ in [
"Toronto3D", "Semantic3D", "S3DIS"
]:
self._attr_rename["feat"] = "colors"
self._attr_rename["feature"] = "colors"
else:
print(
"[ERROR] Dataset split has no data. Please check that you are pointing to the correct directory for the dataset."
)
sys.exit(-1)
def is_loaded(self, name):
"""Check if the data is loaded."""
loaded = super().is_loaded(name)
if loaded and name in self._cached_data:
# make this point cloud the most recently used
self._cached_data.remove(name)
self._cached_data.append(name)
return loaded
def load(self, name, fail_if_no_space=False):
"""Check if data is not loaded, and then load the data."""
assert (name in self._name2datasetidx)
if self.is_loaded(name):
return True
idx = self._name2datasetidx[name]
data = self._dataset.get_data(idx)
data["name"] = name
data["points"] = data["point"]
self.create_point_cloud(data)
if 'bounding_boxes' in data:
self.bounding_box_data.append(
Model.BoundingBoxData(name, data['bounding_boxes']))
if 'cams' in data:
for _, val in data['cams'].items():
lidar2img_rt = val['lidar2img_rt']
bbox_data = data['bounding_boxes']
bbox_3d_img = BoundingBox3D.project_to_img(
bbox_data, np.copy(val['img']), lidar2img_rt)
val['bbox_3d'] = bbox_3d_img
self.create_cams(data['name'], data['cams'], update=True)
size = self._calc_pointcloud_size(self._data[name], self.tclouds[name],
self.tcams[name])
if size + self._current_memory_usage > self._memory_limit:
if fail_if_no_space:
self.unload(name)
return False
else:
# Remove oldest from cache
remove_name = self._cached_data.popleft()
remove_size = self._calc_pointcloud_size(
self._data[remove_name], self.tclouds[remove_name])
self._current_memory_usage -= remove_size
self.unload(remove_name)
# Add new point cloud to cache
self._cached_data.append(name)
self._current_memory_usage += size
return True
else:
self._current_memory_usage += size
self._cached_data.append(name)
return True
def _calc_pointcloud_size(self, raw_data, pcloud, cams={}):
"""Calcute the size of the pointcloud based on the rawdata."""
pcloud_size = 0
for (attr, arr) in raw_data.items():
if not isinstance(arr, dict):
pcloud_size += arr.size * 4
# Point cloud consumes 64 bytes of per point of GPU memory
pcloud_size += pcloud.point["positions"].num_elements() * 64
# TODO: add memory for point cloud color and semantics
# TODO: add memory for cam images
return pcloud_size
def unload(self, name):
"""Unload the data (if it was loaded earlier)."""
# Only unload if this was loadable; we might have an in-memory,
# user-specified data created directly through create_point_cloud().
if name in self._name2datasetidx:
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
self.tcams[name] = {}
bbox_name = Model.bounding_box_prefix + name
for i in range(0, len(self.bounding_box_data)):
if self.bounding_box_data[i].name == bbox_name:
self.bounding_box_data.pop(i)
break
class Visualizer:
"""The visualizer class for dataset objects and custom point clouds."""
class LabelLUTEdit:
"""This class includes functionality for managing a labellut (label
look-up-table).
"""
def __init__(self):
self.widget = gui.TreeView()
self._on_changed = None # takes no args, returns no value
self.clear()
def clear(self):
"""Clears the look-up table."""
self.widget.clear()
self._label2color = {}
def is_empty(self):
"""Checks if the look-up table is empty."""
return len(self._label2color) == 0
def get_colors(self):
"""Returns a list of label keys."""
return [
self._label2color[label] for label in self._label2color.keys()
]
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def set_labels(self, labellut):
"""Updates the labels based on look-up table passsed."""
self.widget.clear()
root = self.widget.get_root_item()
for key in labellut.labels.keys():
lbl = labellut.labels[key]
color = lbl.color
if len(color) == 3:
color += [1.0]
self._label2color[key] = color
color = gui.Color(lbl.color[0], lbl.color[1], lbl.color[2])
cell = gui.LUTTreeCell(
str(key) + ": " + lbl.name, True, color, None, None)
cell.checkbox.set_on_checked(
self._make_on_checked(key, self._on_label_checked))
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(key,
self._on_label_color_changed))
self.widget.add_item(root, cell)
def _make_on_color_changed(self, label, member_func):
def on_changed(color):
member_func(label, color)
return on_changed
def _on_label_color_changed(self, label, gui_color):
self._label2color[label] = [
gui_color.red, gui_color.green, gui_color.blue,
self._label2color[label][3]
]
if self._on_changed is not None:
self._on_changed()
def _make_on_checked(self, label, member_func):
def on_checked(checked):
member_func(label, checked)
return on_checked
def _on_label_checked(self, label, checked):
if checked:
alpha = 1.0
else:
alpha = 0.0
color = self._label2color[label]
self._label2color[label] = [color[0], color[1], color[2], alpha]
if self._on_changed is not None:
self._on_changed()
class ColormapEdit:
"""This class is used to create a color map for visualization of
points.
"""
def __init__(self, window, em):
self.colormap = None
self.widget = gui.Vert()
self._window = window
self._min_value = 0.0
self._max_value = 1.0
self._on_changed = None # takes no args, no return value
self._itemid2idx = {}
self._min_label = gui.Label("")
self._max_label = gui.Label("")
grid = gui.VGrid(2)
grid.add_child(gui.Label("Range (min):"))
grid.add_child(self._min_label)
grid.add_child(gui.Label("Range (max):"))
grid.add_child(self._max_label)
self.widget.add_child(grid)
self.widget.add_fixed(0.5 * em)
self.widget.add_child(gui.Label("Colormap"))
self._edit = gui.TreeView()
self._edit.set_on_selection_changed(self._on_selection_changed)
self.widget.add_child(self._edit)
self._delete = gui.Button("Delete")
self._delete.horizontal_padding_em = 0.5
self._delete.vertical_padding_em = 0
self._delete.set_on_clicked(self._on_delete)
self._add = gui.Button("Add")
self._add.horizontal_padding_em = 0.5
self._add.vertical_padding_em = 0
self._add.set_on_clicked(self._on_add)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._delete)
h.add_fixed(0.25 * em)
h.add_child(self._add)
h.add_stretch()
self.widget.add_fixed(0.5 * em)
self.widget.add_child(h)
self.widget.add_fixed(0.5 * em)
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def update(self, colormap, min_val, max_val):
"""Updates the colormap based on the minimum and maximum values
passed.
"""
self.colormap = colormap
self._min_value = min_val
self._max_value = max_val
self._min_label.text = str(min_val)
self._max_label.text = str(max_val)
if self._min_value >= self._max_value:
self._max_value = self._min_value + 1.0
self._edit.clear()
self._itemid2idx = {}
root_id = self._edit.get_root_item()
for i in range(0, len(self.colormap.points)):
p = self.colormap.points[i]
color = gui.Color(p.color[0], p.color[1], p.color[2])
val = min_val + p.value * (max_val - min_val)
cell = gui.ColormapTreeCell(val, color, None, None)
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(i, self._on_color_changed))
cell.number_edit.set_on_value_changed(
self._make_on_value_changed(i, self._on_value_changed))
item_id = self._edit.add_item(root_id, cell)
self._itemid2idx[item_id] = i
self._update_buttons_enabled()
def _make_on_color_changed(self, idx, member_func):
def on_changed(color):
member_func(idx, color)
return on_changed
def _on_color_changed(self, idx, gui_color):
self.colormap.points[idx].color = [
gui_color.red, gui_color.green, gui_color.blue
]
if self._on_changed is not None:
self._on_changed()
def _make_on_value_changed(self, idx, member_func):
def on_changed(value):
member_func(idx, value)
return on_changed
def _on_value_changed(self, idx, value):
value = (value - self._min_value) / (self._max_value -
self._min_value)
needs_update = False
value = min(1.0, max(0.0, value))
if ((idx > 0 and value < self.colormap.points[idx - 1].value) or
(idx < len(self.colormap.points) - 1 and
value > self.colormap.points[idx + 1].value)):
self.colormap.points[idx].value = value
o = self.colormap.points[idx]
self.colormap.points.sort(key=lambda cmap_pt: cmap_pt.value)
for i in range(0, len(self.colormap.points)):
if self.colormap.points[i] is o:
idx = i
break
needs_update = True
if idx > 0 and value == self.colormap.points[idx - 1].value:
if idx < len(self.colormap.points):
upper = self.colormap.points[idx + 1].value
else:
upper = 1.0
value = value + 0.5 * (upper - value)
needs_update = True
if idx < len(self.colormap.points
) - 1 and value == self.colormap.points[idx + 1].value:
if idx > 0:
lower = self.colormap.points[idx - 1].value
else:
lower = 0.0
value = lower + 0.5 * (value - lower)
needs_update = True
self.colormap.points[idx].value = value
if needs_update:
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_selection_changed(self, item_id):
self._update_buttons_enabled()
def _on_delete(self):
if len(self.colormap.points) > 2:
idx = self._itemid2idx[self._edit.selected_item]
self.colormap.points = self.colormap.points[:
idx] + self.colormap.points[
idx + 1:]
del self._itemid2idx[self._edit.selected_item]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_add(self):
if self._edit.selected_item in self._itemid2idx: # maybe no selection
idx = self._itemid2idx[self._edit.selected_item]
if idx < len(self.colormap.points) - 1:
lower = self.colormap.points[idx]
upper = self.colormap.points[idx + 1]
else:
lower = self.colormap.points[len(self.colormap.points) - 2]
upper = self.colormap.points[len(self.colormap.points) - 1]
add_idx = min(idx + 1, len(self.colormap.points) - 1)
new_value = lower.value + 0.5 * (upper.value - lower.value)
new_color = [
0.5 * lower.color[0] + 0.5 * upper.color[0],
0.5 * lower.color[1] + 0.5 * upper.color[1],
0.5 * lower.color[2] + 0.5 * upper.color[2]
]
new_point = Colormap.Point(new_value, new_color)
self.colormap.points = self.colormap.points[:add_idx] + [
new_point
] + self.colormap.points[add_idx:]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _update_buttons_enabled(self):
if self._edit.selected_item in self._itemid2idx:
self._delete.enabled = len(self.colormap.points) > 2
self._add.enabled = True
else:
self._delete.enabled = False
self._add.enabled = False
def _update_later(self):
def update():
self.update(self.colormap, self._min_value, self._max_value)
self._window.post_redraw() # need to manually request redraw
gui.Application.instance.post_to_main_thread(self._window, update)
class ProgressDialog:
"""This class is used to manage the progress dialog displayed during
visualization.
Args:
title: The title of the dialog box.
window: The window where the progress dialog box should be displayed.
n_items: The maximum number of items.
"""
def __init__(self, title, window, n_items):
self._window = window
self._n_items = n_items
em = window.theme.font_size
self.dialog = gui.Dialog(title)
self._label = gui.Label(title + " ")
self._layout = gui.Vert(0, gui.Margins(em, em, em, em))
self.dialog.add_child(self._layout)
self._layout.add_child(self._label)
self._layout.add_fixed(0.5 * em)
self._progress = gui.ProgressBar()
self._progress.value = 0.0
self._layout.add_child(self._progress)
def set_text(self, text):
"""Set the label text on the dialog box."""
self._label.text = text + " "
def post_update(self, text=None):
"""Post updates to the main thread."""
if text is None:
gui.Application.instance.post_to_main_thread(
self._window, self.update)
else:
def update_with_text():
self.update()
self._label.text = text
gui.Application.instance.post_to_main_thread(
self._window, update_with_text)
def update(self):
"""Enumerate the progress in the dialog box."""
value = min(1.0, self._progress.value + 1.0 / self._n_items)
self._progress.value = value
SOLID_NAME = "Solid Color"
LABELS_NAME = "Label Colormap"
RAINBOW_NAME = "Colormap (Rainbow)"
GREYSCALE_NAME = "Colormap (Greyscale)"
COLOR_NAME = "RGB"
X_ATTR_NAME = "x position"
Y_ATTR_NAME = "y position"
Z_ATTR_NAME = "z position"
def __init__(self, fps=4):
self._objects = None
self._name2treenode = {}
self._name2treeid = {}
self._treeid2name = {}
self._attrname2lut = {}
self._colormaps = {}
self._shadername2panelidx = {}
self._gradient = rendering.Gradient()
self._scalar_min = 0.0
self._scalar_max = 1.0
self._animation_frames = []
self._last_animation_time = time.time()
self._animation_delay_secs = 1./(fps + 1e-9)
self._consolidate_bounding_boxes = False
self._dont_update_geometry = False
self._prev_img_mode = 0
def _init_dataset(self, dataset, indices, prefix, lut=None):
self._objects = DatasetModel(dataset, indices, prefix, lut)
self._modality = dict()
self._modality['use_lidar'] = True
self._modality['use_camera'] = False
if hasattr(self._objects._dataset, 'infos'):
if 'lidar_path' in self._objects._dataset.infos[0]:
self._modality['use_lidar'] = True
if 'cams' in self._objects._dataset.infos[0]:
self._modality['use_camera'] = True
self._cam_names = list(
self._objects._dataset.infos[0]['cams'].keys())
def _init_data(self, data):
self._objects = DataModel(data)
self._modality = dict()
for _, val in self._objects._name2srcdata.items():
if isinstance(val, dict):
if 'points' in val or 'point' in val:
self._modality['use_lidar'] = True
if 'cams' in val:
self._modality['use_camera'] = True
self._cam_names = list(
self._objects._dataset.infos[0]['cams'].keys())
def _init_user_interface(self, title, width, height):
### ADD!
self._obj_3d_labels = []
self.window = gui.Application.instance.create_window(
title, width, height)
self.window.set_on_layout(self._on_layout)
em = self.window.theme.font_size
self._3d = gui.SceneWidget()
self._3d.enable_scene_caching(True) # makes UI _much_ more responsive
self._3d.scene = rendering.Open3DScene(self.window.renderer)
self.window.add_child(self._3d)
self._panel = gui.Vert()
self.window.add_child(self._panel)
indented_margins = gui.Margins(em, 0, em, 0)
# View controls
ctrl = gui.CollapsableVert("Mouse Controls", 0, indented_margins)
arcball = gui.Button("Arcball")
arcball.set_on_clicked(self._on_arcball_mode)
arcball.horizontal_padding_em = 0.5
arcball.vertical_padding_em = 0
fly = gui.Button("Fly")
fly.set_on_clicked(self._on_fly_mode)
fly.horizontal_padding_em = 0.5
fly.vertical_padding_em = 0
reset = gui.Button("Re-center")
reset.set_on_clicked(self._on_reset_camera)
reset.horizontal_padding_em = 0.5
reset.vertical_padding_em = 0
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(arcball)
h.add_child(fly)
h.add_fixed(em)
h.add_child(reset)
h.add_stretch()
ctrl.add_child(h)
ctrl.add_fixed(em)
self._panel.add_child(ctrl)
# Dataset
model = gui.CollapsableVert("Dataset", 0, indented_margins)
vgrid = gui.VGrid(2, 0.25 * em)
model.add_child(vgrid)
model.add_fixed(0.5 * em)
bgcolor = gui.ColorEdit()
#background color
bgcolor.color_value = gui.Color(1, 1, 1)
self._on_bgcolor_changed(bgcolor.color_value)
bgcolor.set_on_value_changed(self._on_bgcolor_changed)
vgrid.add_child(gui.Label("BG Color"))
vgrid.add_child(bgcolor)
list_selector = gui.CollapsableVert("Selector", 0, indented_margins)
list_selector_grid = gui.VGrid(4, 0.25 * em)
list_selector_grid.add_child(gui.Label("lower"))
list_selector.add_child(list_selector_grid)
self._lower_val = gui.NumberEdit(gui.NumberEdit.INT)
self._lower_val.int_value = 0
self._prev_lower_val = 0
self._lower_val.set_limits(0, len(self._objects.data_names) - 1)
self._lower_val.set_on_value_changed(self._on_lower_val)
list_selector_grid.add_child(self._lower_val)
list_selector_grid.add_child(gui.Label("upper"))
self._upper_val = gui.NumberEdit(gui.NumberEdit.INT)
self._upper_val.int_value = len(self._objects.data_names) - 1
self._prev_upper_val = 0
self._upper_val.set_limits(0, len(self._objects.data_names) - 1)
self._upper_val.set_on_value_changed(self._on_upper_val)
list_selector_grid.add_child(self._upper_val)
view_tab = gui.TabControl()
view_tab.set_on_selected_tab_changed(self._on_display_tab_changed)
model.add_child(view_tab)
# ... model list
self._dataset = gui.TreeView()
self._dataset.set_on_selection_changed(
self._on_dataset_selection_changed)
list_grid = gui.Vert(2)
list_grid.add_child(list_selector)
list_grid.add_child(self._dataset)
# ... animation slider
v = gui.Vert()
view_tab.add_tab("Animation", v)
v.add_fixed(0.25 * em)
grid = gui.VGrid(2)
v.add_child(grid)
# ... select image mode
self._img_mode = gui.Combobox()
for item in ["raw", "bbox_3d"]:
self._img_mode.add_item(item)
self._img_mode.selected_index = 0
self._img_mode.set_on_selection_changed(self._on_img_mode_changed)
grid.add_child(gui.Label("Image Mode"))
grid.add_child(self._img_mode)
self._slider = gui.Slider(gui.Slider.INT)
self._slider.set_limits(0, len(self._objects.data_names))
self._slider.set_on_value_changed(self._on_animation_slider_changed)
grid.add_child(gui.Label("Index"))
grid.add_child(self._slider)
self._slider_current = gui.Label("")
grid.add_child(gui.Label("Showing"))
grid.add_child(self._slider_current)
v.add_fixed(em)
self._play = gui.Button("Play")
self._play.horizontal_padding_em = 0.5
self._play.vertical_padding_em = 0
self._play.set_on_clicked(self._on_start_animation)
self._next = gui.Button(">")
self._next.horizontal_padding_em = 0.5
self._next.vertical_padding_em = 0
self._next.set_on_clicked(self._on_next)
self._prev = gui.Button("<")
self._prev.horizontal_padding_em = 0.5
self._prev.vertical_padding_em = 0
self._prev.set_on_clicked(self._on_prev)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._prev)
h.add_child(self._play)
h.add_child(self._next)
h.add_stretch()
v.add_child(h)
view_tab.add_tab("List", list_grid)
if 'use_camera' in self._modality and self._modality['use_camera']:
w = gui.CollapsableVert("Cameras", 0, indented_margins)
cam_grid = gui.VGrid(
2, 0, indented_margins) # change no. of cam_grid columns here
self._img = dict()
w.add_child(cam_grid)
v.add_child(w)
for cam in self._cam_names:
self._img[cam] = gui.ImageWidget(o3d.t.geometry.Image())
cam_grid.add_child(self._img[cam])
# Coloring
properties = gui.CollapsableVert("Properties", 0, indented_margins)
grid = gui.VGrid(2, 0.25 * em)
# ... data source
self._datasource_combobox = gui.Combobox()
self._datasource_combobox.set_on_selection_changed(
self._on_datasource_changed)
self._colormap_channel = gui.Combobox()
self._colormap_channel.add_item("0")
self._colormap_channel.set_on_selection_changed(
self._on_channel_changed)
h = gui.Horiz()
h.add_child(self._datasource_combobox)
h.add_fixed(em)
h.add_child(gui.Label("Index"))
h.add_child(self._colormap_channel)
grid.add_child(gui.Label("Data"))
grid.add_child(h)
# ... shader
self._shader = gui.Combobox()
self._shader.add_item(self.SOLID_NAME)
self._shader.add_item(self.LABELS_NAME)
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.COLOR_NAME)
self._colormaps[self.RAINBOW_NAME] = Colormap.make_rainbow()
self._colormaps[self.GREYSCALE_NAME] = Colormap.make_greyscale()
self._shader.selected_index = 0
self._shader.set_on_selection_changed(self._on_shader_changed)
grid.add_child(gui.Label("Shader"))
grid.add_child(self._shader)
properties.add_child(grid)
# ... add model widget after property widget
self._panel.add_child(model)
# ... shader panels
self._shader_panels = gui.StackedWidget()
panel_idx = 0
# ... sub-panel: single color
self._color_panel = gui.Vert()
self._shader_panels.add_child(self._color_panel)
self._shadername2panelidx[self.SOLID_NAME] = panel_idx
panel_idx += 1
self._color = gui.ColorEdit()
self._color.color_value = gui.Color(0.5, 0.5, 0.5)
self._color.set_on_value_changed(self._on_shader_color_changed)
h = gui.Horiz()
h.add_child(gui.Label("Color"))
h.add_child(self._color)
self._color_panel.add_child(h)
# ... sub-panel: labels
self._labels_panel = gui.Vert()
self._shader_panels.add_child(self._labels_panel)
self._shadername2panelidx[self.LABELS_NAME] = panel_idx
panel_idx += 1
self._label_edit = self.LabelLUTEdit()
self._label_edit.set_on_changed(self._on_labels_changed)
self._labels_panel.add_child(gui.Label("Labels"))
self._labels_panel.add_child(self._label_edit.widget)
# ... sub-panel: colormap
self._colormap_panel = gui.Vert()
self._shader_panels.add_child(self._colormap_panel)
self._shadername2panelidx[self.RAINBOW_NAME] = panel_idx
self._shadername2panelidx[self.GREYSCALE_NAME] = panel_idx
panel_idx += 1
self._colormap_edit = self.ColormapEdit(self.window, em)
self._colormap_edit.set_on_changed(self._on_colormap_changed)
self._colormap_panel.add_child(self._colormap_edit.widget)
# ... sub-panel: RGB
self._rgb_panel = gui.Vert()
self._shader_panels.add_child(self._rgb_panel)
self._shadername2panelidx[self.COLOR_NAME] = panel_idx
panel_idx += 1
self._rgb_combo = gui.Combobox()
self._rgb_combo.add_item("255")
self._rgb_combo.add_item("1.0")
self._rgb_combo.set_on_selection_changed(self._on_rgb_multiplier)
h = gui.Horiz(0.5 * em)
h.add_child(gui.Label("Max value"))
h.add_child(self._rgb_combo)
self._rgb_panel.add_child(h)
properties.add_fixed(em)
properties.add_child(self._shader_panels)
#collapse the panel
properties.set_is_open(True)
self._panel.add_child(properties)
# Populate tree, etc.
for name in self._objects.data_names:
self._add_tree_name(name)
self._update_datasource_combobox()
def set_lut(self, attr_name, lut):
"""Set the LUT for a specific attribute.
Args:
attr_name: The attribute name as string.
lut: The LabelLUT object that should be updated.
"""
self._attrname2lut[attr_name] = lut
def setup_camera(self):
"""Set up camera for visualization."""
selected_names = self._get_selected_names()
selected_bounds = [
self._objects.calc_bounds_for(n) for n in selected_names
]
min_val = [1e30, 1e30, 1e30]
max_val = [-1e30, -1e30, -1e30]
for b in selected_bounds:
for i in range(0, 3):
min_val[i] = min(min_val[i], b[0][i])
max_val[i] = max(max_val[i], b[1][i])
bounds = o3d.geometry.AxisAlignedBoundingBox(min_val, max_val)
self._3d.setup_camera(60, bounds, bounds.get_center())
def show_geometries_under(self, name, show):
"""Show geometry for a given node."""
prefix = name
for (n, node) in self._name2treenode.items():
if n.startswith(prefix):
self._3d.scene.show_geometry(n, show)
node.checkbox.checked = show
self._3d.force_redraw()
def _add_tree_name(self, name, is_geometry=True):
names = name.split("/")
parent = self._dataset.get_root_item()
for i in range(0, len(names) - 1):
n = "/".join(names[:i + 1]) + "/"
if n in self._name2treeid:
parent = self._name2treeid[n]
else:
def on_parent_checked(checked):
self.show_geometries_under(n, checked)
cell = gui.CheckableTextTreeCell(n, True, on_parent_checked)
parent = self._dataset.add_item(parent, cell)
self._name2treenode[n] = cell
self._name2treeid[n] = parent
self._treeid2name[parent] = n
def on_checked(checked):
self._3d.scene.show_geometry(name, checked)
if self._is_tree_name_geometry(name):
# available attrs could change
self._update_datasource_combobox()
self._update_bounding_boxes()
self._3d.force_redraw()
cell = gui.CheckableTextTreeCell(names[-1], True, on_checked)
if is_geometry:
cell.label.text_color = gui.Color(1.0, 0.0, 0.0, 1.0)
node = self._dataset.add_item(parent, cell)
self._name2treenode[name] = cell
self._treeid2name[node] = name
self._slider.set_limits(0, len(self._objects.data_names) - 1)
if len(self._objects.data_names) == 1:
self._slider_current.text = name
def _load_geometry(self, name, ui_done_callback):
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window, 2)
progress_dlg.set_text("Loading " + name + "...")
def load_thread():
result = self._objects.load(name)
progress_dlg.post_update("Loading " + name + "...")
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _load_geometries(self, names, ui_done_callback):
# Progress has: len(names) items + ui_done_callback
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window,
len(names) + 1)
progress_dlg.set_text("Loading " + names[0] + "...")
def load_thread():
for i in range(0, len(names)):
result = self._objects.load(names[i], True)
if i + 1 < len(names):
text = "Loading " + names[i + 1] + "..."
else:
text = "Creating GPU objects..."
progress_dlg.post_update(text)
if result:
self._name2treenode[names[i]].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
else:
break
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _update_geometry(self, check_unloaded=False):
if check_unloaded:
for name in self._objects.data_names:
if not self._objects.is_loaded(name):
self._3d.scene.remove_geometry(name)
material = self._get_material()
for n, tcloud in self._objects.tclouds.items():
self._update_point_cloud(n, tcloud, material)
if not tcloud.is_empty():
self._name2treenode[n].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
if self._3d.scene.has_geometry(n):
self._3d.scene.modify_geometry_material(n, material)
else:
self._name2treenode[n].label.text_color = gui.Color(
1.0, 0.0, 0.0, 1.0)
self._name2treenode[n].checkbox.checked = False
self._3d.force_redraw()
def _update_point_cloud(self, name, tcloud, material):
if self._dont_update_geometry:
return
if tcloud.is_empty():
return
attr_name = self._datasource_combobox.selected_text
attr = None
flag = 0
attr = self._objects.get_attr(name, attr_name)
# Update scalar values
if attr is not None:
if len(attr.shape) == 1:
scalar = attr
else:
channel = max(0, self._colormap_channel.selected_index)
scalar = attr[:, channel]
else:
shape = [len(tcloud.point["positions"].numpy())]
scalar = np.zeros(shape, dtype='float32')
tcloud.point["__visualization_scalar"] = Visualizer._make_tcloud_array(
scalar)
flag |= rendering.Scene.UPDATE_UV0_FLAG
# Update RGB values
if attr is not None and (len(attr.shape) == 2 and attr.shape[1] >= 3):
max_val = float(self._rgb_combo.selected_text)
if max_val <= 0:
max_val = 255.0
colors = attr[:, [0, 1, 2]] * (1.0 / max_val)
# tcloud.point["colors"] = Visualizer._make_tcloud_array(colors)
flag |= rendering.Scene.UPDATE_COLORS_FLAG
# Update geometry
if self._3d.scene.scene.has_geometry(name):
self._3d.scene.scene.update_geometry(name, tcloud, flag)
else:
self._3d.scene.add_geometry(name, tcloud, material)
node = self._name2treenode[name]
if node is not None:
self._3d.scene.show_geometry(name, node.checkbox.checked)
def _get_material(self):
self._update_gradient()
material = rendering.MaterialRecord()
if self._shader.selected_text == self.SOLID_NAME:
material.shader = "unlitSolidColor"
c = self._color.color_value
material.base_color = [c.red, c.green, c.blue, 1.0]
elif self._shader.selected_text == self.COLOR_NAME:
material.shader = "defaultUnlit"
material.base_color = [1.0, 1.0, 1.0, 1.0]
else:
material.shader = "unlitGradient"
material.gradient = self._gradient
material.scalar_min = self._scalar_min
material.scalar_max = self._scalar_max
return material
def _update_bounding_boxes(self, animation_frame=None):
if len(self._attrname2lut) == 1:
# Can't do dict.values()[0], so have to iterate over the 1 element
for v in self._attrname2lut.values():
lut = v
elif "labels" in self._attrname2lut:
lut = self._attrname2lut["labels"]
elif "label" in self._attrname2lut:
lut = self._attrname2lut["label"]
else:
lut = None
mat = rendering.MaterialRecord()
mat.shader = "unlitLine"
#3dbox line width
mat.line_width = 2 * self.window.scaling
if self._consolidate_bounding_boxes:
name = Model.bounding_box_prefix.split("/")[0]
boxes = []
# When consolidated we assume bbox_data.name is the geometry name.
if animation_frame is None:
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name in self._name2treenode and self._name2treenode[
bbox_data.name].checkbox.checked:
boxes += bbox_data.boxes
else:
geom_name = self._animation_frames[animation_frame]
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name == geom_name:
boxes = bbox_data.boxes
break
self._3d.scene.remove_geometry(name)
################## HANDLE OBJ 3D LABEL SHOW #################
for obj_3d_label in self._obj_3d_labels:
self._3d.remove_3d_label(obj_3d_label)
self._obj_3d_labels.clear()
################## HANDLE OBJ 3D LABEL SHOW #################
if len(boxes) > 0:
lines = BoundingBox3D.create_lines(boxes, lut)
self._3d.scene.add_geometry(name, lines, mat)
################## HANDLE OBJ 3D LABEL SHOW #################
# Starts with open3d v1.13
for box in boxes:
if box.show_meta:
# meta_pos = box.center + [0., box.size[2]*0.5, 0.]
meta_pos = box.meta_center
# meta_pos = box.center
# print(box.center, meta_pos, box.size)
self._obj_3d_labels.append(self._3d.add_3d_label(meta_pos, box.meta))
self._obj_3d_labels[-1].scale = 1
label = lut.labels[box.label_class]
self._obj_3d_labels[-1].color = gui.Color(label.color[0], label.color[1], label.color[2])
################## HANDLE OBJ 3D LABEL SHOW #################
if name not in self._name2treenode:
self._add_tree_name(name, is_geometry=False)
self._3d.force_redraw()
else:
# Don't run this more than once if we aren't consolidating,
# because nothing will change.
if len(self._objects.bounding_box_data) > 0:
if self._objects.bounding_box_data[
0].name in self._name2treenode:
return
for bbox_data in self._objects.bounding_box_data:
lines = BoundingBox3D.create_lines(bbox_data.boxes, lut)
self._3d.scene.add_geometry(bbox_data.name, lines, mat)
for bbox_data in self._objects.bounding_box_data:
self._add_tree_name(bbox_data.name, is_geometry=False)
self._3d.force_redraw()
def _update_gradient(self):
if self._shader.selected_text == self.LABELS_NAME:
colors = self._label_edit.get_colors()
n = float(len(colors) - 1)
if n >= 1:
self._gradient.points = [
rendering.Gradient.Point(
float(i) / n, [
colors[i][0], colors[i][1], colors[i][2],
colors[i][3]
]) for i in range(0, len(colors))
]
else:
self._gradient.points = [
rendering.Gradient.Point(0.0, [1.0, 0.0, 1.0, 1.0])
]
self._gradient.mode = rendering.Gradient.LUT
else:
cmap = self._colormaps.get(self._shader.selected_text)
if cmap is not None:
self._gradient.points = [
rendering.Gradient.Point(
p.value, [p.color[0], p.color[1], p.color[2], 1.0])
for p in cmap.points
]
self._gradient.mode = rendering.Gradient.GRADIENT
def _update_geometry_colors(self):
material = self._get_material()
for name, tcloud in self._objects.tclouds.items():
if not tcloud.is_empty() and self._3d.scene.has_geometry(name):
self._3d.scene.modify_geometry_material(name, material)
self._3d.force_redraw()
def _update_datasource_combobox(self):
current = self._datasource_combobox.selected_text
self._datasource_combobox.clear_items()
available_attrs = self._get_available_attrs()
for attr_name in available_attrs:
self._datasource_combobox.add_item(attr_name)
if current in available_attrs:
self._datasource_combobox.selected_text = current
elif len(available_attrs) > 0:
self._datasource_combobox.selected_text = available_attrs[0]
else:
# If no attributes, two possibilities:
# 1) no geometries are selected: don't change anything
# 2) geometries are selected: color solid
has_checked = False
for n, node in self._name2treenode.items():
if node.checkbox.checked and self._is_tree_name_geometry(n):
has_checked = True
break
if has_checked:
self._set_shader(self.SOLID_NAME)
def _update_shaders_combobox(self):
current_attr = self._datasource_combobox.selected_text
current_shader = self._shader.selected_text
has_lut = (current_attr in self._attrname2lut)
is_scalar = True
selected_names = self._get_selected_names()
if len(selected_names) > 0 and len(
self._objects.get_attr_shape(selected_names[0],
current_attr)) > 1:
is_scalar = False
self._shader.clear_items()
if not is_scalar:
self._shader.add_item(self.COLOR_NAME)
if has_lut:
self._shader.add_item(self.LABELS_NAME)
self._label_edit.set_labels(self._attrname2lut[current_attr])
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.SOLID_NAME)
if current_shader == self.LABELS_NAME and has_lut:
self._set_shader(self.LABELS_NAME)
elif is_scalar:
self._set_shader(self.RAINBOW_NAME)
def _update_attr_range(self):
attr_name = self._datasource_combobox.selected_text
current_channel = self._colormap_channel.selected_index
self._scalar_min, self._scalar_max = self._objects.get_attr_minmax(
attr_name, current_channel)
if self._shader.selected_text in self._colormaps:
cmap = self._colormaps[self._shader.selected_text]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
def _set_shader(self, shader_name, force_update=False):
# Disable channel if we are using a vector shader. Always do this to
# ensure that the UI is consistent.
if shader_name == Visualizer.COLOR_NAME:
self._colormap_channel.enabled = False
else:
self._colormap_channel.enabled = True
if shader_name == self._shader.selected_text and not force_update:
return
self._shader.selected_text = shader_name
idx = self._shadername2panelidx[self._shader.selected_text]
self._shader_panels.selected_index = idx
if shader_name in self._colormaps:
cmap = self._colormaps[shader_name]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
self._update_geometry_colors()
def _on_layout(self, context=None):
frame = self.window.content_rect
em = self.window.theme.font_size
panel_width = 20 * em #20 * em
panel_rect = gui.Rect(frame.get_right() - panel_width, frame.y,
panel_width, frame.height - frame.y)
self._panel.frame = panel_rect
self._3d.frame = gui.Rect(frame.x, frame.y, panel_rect.x - frame.x,
frame.height - frame.y)
# self._3d.frame = gui.Rect(frame.x, frame.y, frame.width,
# frame.height)
def _on_arcball_mode(self):
self._3d.set_view_controls(gui.SceneWidget.ROTATE_CAMERA)
def _on_fly_mode(self):
self._3d.set_view_controls(gui.SceneWidget.FLY)
def _on_reset_camera(self):
self.setup_camera()
def _on_dataset_selection_changed(self, item):
name = self._treeid2name[item]
if not self._is_tree_name_geometry(name):
return
def ui_callback():
self._update_attr_range()
self._update_geometry(check_unloaded=True)
self._update_bounding_boxes()
if not self._objects.is_loaded(name):
self._load_geometry(name, ui_callback)
def _on_display_tab_changed(self, index):
if index == 0:
self._animation_frames = self._get_selected_names()
self._slider.set_limits(0, len(self._animation_frames) - 1)
self._on_animation_slider_changed(self._slider.int_value)
# _on_animation_slider_changed() calls _update_bounding_boxes()
else:
for name, node in self._name2treenode.items():
self._3d.scene.show_geometry(name, node.checkbox.checked)
self._update_bounding_boxes()
def _on_animation_slider_changed(self, new_value):
idx = int(new_value)
for i in range(0, len(self._animation_frames)):
self._3d.scene.show_geometry(self._animation_frames[i], (i == idx))
if 'use_camera' in self._modality and self._modality['use_camera']:
for cam in self._cam_names:
self._img[cam].update_image(
self._objects.tcams[self._animation_frames[idx]][cam])
self._update_bounding_boxes(animation_frame=idx)
self._3d.force_redraw()
self._slider_current.text = self._animation_frames[idx]
r = self._slider_current.frame
self._slider_current.frame = gui.Rect(r.x, r.y,
self._slider.frame.get_right(),
r.height)
def _on_start_animation(self):
def on_tick():
return self._on_animate()
self._play.text = "Stop"
self._play.set_on_clicked(self._on_stop_animation)
self._last_animation_time = 0.0
self.window.set_on_tick_event(on_tick)
def _on_animate(self):
now = time.time()
if now >= self._last_animation_time + self._animation_delay_secs:
idx = (self._slider.int_value + 1) % len(self._animation_frames)
self._slider.int_value = idx
self._on_animation_slider_changed(idx)
self._last_animation_time = now
return True
return False
def _on_stop_animation(self):
self.window.set_on_tick_event(None)
self._play.text = "Play"
self._play.set_on_clicked(self._on_start_animation)
def _on_next(self):
self._slider.int_value += 1
self._on_animation_slider_changed(self._slider.int_value)
def _on_prev(self):
self._slider.int_value -= 1
self._on_animation_slider_changed(self._slider.int_value)
def _on_img_mode_changed(self, name, idx):
if idx == self._prev_img_mode:
return
if not 'use_camera' in self._modality or not self._modality[
'use_camera']:
return
self._prev_img_mode = idx
if idx == 0: # or name == 'raw'
for n in self._objects.data_names:
if self._objects.is_loaded(n):
self._objects.create_cams(n,
self._objects._data[n]['cams'],
update=False)
elif idx == 1: # or name == 'bbox_3d'
for n in self._objects.data_names:
if self._objects.is_loaded(n):
self._objects.create_cams(n,
self._objects._data[n]['cams'],
key='bbox_3d',
update=False)
def _on_bgcolor_changed(self, new_color):
bg_color = [
new_color.red, new_color.green, new_color.blue, new_color.alpha
]
self._3d.scene.set_background(bg_color)
self._3d.force_redraw()
def _on_lower_val(self, val):
if val > self._upper_val.int_value:
self._lower_val.int_value = self._upper_val.int_value
if val < int(self._lower_val.minimum_value):
self._lower_val.int_value = int(self._lower_val.minimum_value)
self._uncheck_bw_lims()
self._check_bw_lims()
self._prev_lower_val = int(self._lower_val.int_value)
# self._on_datasource_changed(
# self._datasource_combobox.selected_text,
# self._datasource_combobox.selected_index)
self._update_bounding_boxes()
def _on_upper_val(self, val):
if val < self._lower_val.int_value:
self._upper_val.int_value = self._lower_val.int_value
if val > int(self._upper_val.maximum_value):
self._upper_val.int_value = int(self._upper_val.maximum_value)
self._uncheck_bw_lims()
self._check_bw_lims()
self._prev_upper_val = int(self._upper_val.int_value)
# self._on_datasource_changed(
# self._datasource_combobox.selected_text,
# self._datasource_combobox.selected_index)
self._update_bounding_boxes()
def _uncheck_bw_lims(self):
if self._prev_lower_val < self._lower_val.int_value:
for i in range(self._prev_lower_val, self._lower_val.int_value):
name = self._objects.data_names[i]
self._name2treenode[name].checkbox.checked = False
self._3d.scene.show_geometry(name, False)
if self._prev_upper_val > self._upper_val.int_value:
for i in range(self._upper_val.int_value + 1,
self._prev_upper_val + 1):
name = self._objects.data_names[i]
self._name2treenode[name].checkbox.checked = False
self._3d.scene.show_geometry(name, False)
def _check_bw_lims(self):
for i in range(self._lower_val.int_value,
self._upper_val.int_value + 1):
name = self._objects.data_names[i]
self._name2treenode[name].checkbox.checked = True
item = [j for j, k in self._treeid2name.items() if name == k][0]
self._on_dataset_selection_changed(item)
self._3d.scene.show_geometry(name, True)
self._3d.force_redraw()
def _on_datasource_changed(self, attr_name, idx):
selected_names = self._get_selected_names()
n_channels = 1
if len(selected_names) > 0:
shape = self._objects.get_attr_shape(selected_names[0], attr_name)
if len(shape) <= 1:
n_channels = 1
else:
n_channels = max(1, shape[1])
current_channel = max(0, self._colormap_channel.selected_index)
current_channel = min(n_channels - 1, current_channel)
self._colormap_channel.clear_items()
for i in range(0, n_channels):
self._colormap_channel.add_item(str(i))
self._colormap_channel.selected_index = current_channel
self._update_attr_range()
self._update_shaders_combobox()
# Try to intelligently pick a shader.
current_shader = self._shader.selected_text
if current_shader == Visualizer.SOLID_NAME:
pass
elif attr_name in self._attrname2lut:
self._set_shader(Visualizer.LABELS_NAME)
elif attr_name == "colors":
self._set_shader(Visualizer.COLOR_NAME)
elif n_channels >= 3:
self._set_shader(Visualizer.SOLID_NAME)
elif current_shader == Visualizer.COLOR_NAME: # vector -> scalar
self._set_shader(Visualizer.RAINBOW_NAME)
# self._set_shader(Visualizer.SOLID_NAME)
else: # changing from one scalar to another, don't change
pass
self._update_geometry()
def _on_channel_changed(self, name, idx):
self._update_attr_range()
self._update_geometry() # need to recompute scalars array
def _on_shader_changed(self, name, idx):
# _shader.current_text is already name, so we need to force an update
self._set_shader(name, force_update=True)
def _on_shader_color_changed(self, color):
self._update_geometry_colors()
def _on_labels_changed(self):
self._update_geometry_colors()
def _on_colormap_changed(self):
self._colormaps[
self._shader.selected_text] = self._colormap_edit.colormap
self._update_geometry_colors()
def _on_rgb_multiplier(self, text, idx):
self._update_geometry()
def _get_selected_names(self):
# Note that things like bounding boxes could be in the tree, and we
# do not want to include them in the list of things selected, even if
# they are checked.
selected_names = []
for n in self._objects.data_names:
if self._name2treenode[n].checkbox.checked:
selected_names.append(n)
return selected_names
def _get_available_attrs(self):
selected_names = self._get_selected_names()
return self._objects.get_available_attrs(selected_names)
def _is_tree_name_geometry(self, name):
return (name in self._objects.data_names)
@staticmethod
def _make_tcloud_array(np_array, copy=False):
if copy or not np_array.data.c_contiguous:
return o3d.core.Tensor(np_array)
else:
return o3d.core.Tensor.from_numpy(np_array)
def visualize_dataset(self,
dataset,
prefix="",
lut=None,
indices=None,
width=1280+320,
height=768):
"""Visualize a dataset.
Example:
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
Args:
dataset: The dataset to use for visualization.
split: The dataset split to be used, such as 'training'
indices: An iterable with a subset of the data points to visualize, such as [0,2,3,4].
width: The width of the visualization window.
height: The height of the visualization window.
"""
# Setup the labels
if lut is None:
lut = LabelLUT()
for key, val in dataset.label_to_names.items():
if len(val) == 0:
lut.add_label(key, key)
self.set_lut("labels", lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, indices, prefix, lut)
self._visualize("3DTrans", width, height)
def visualize(self,
data,
lut=None,
bounding_boxes=None,
width=1280,
height=768):
"""Visualize a custom point cloud data.
Example:
Minimal example for visualizing a single point cloud with an
attribute::
import numpy as np
import open3d.ml.torch as ml3d
# or import open3d.ml.tf as ml3d
data = [ {
'name': 'my_point_cloud',
'points': np.random.rand(100,3).astype(np.float32),
'point_attr1': np.random.rand(100).astype(np.float32),
} ]
vis = ml3d.vis.Visualizer()
vis.visualize(data)
Args:
data: A list of dictionaries. Each dictionary is a point cloud with
attributes. Each dictionary must have the entries 'name' and
'points'. Points and point attributes can be passed as numpy
arrays, PyTorch tensors or TensorFlow tensors.
lut: Optional lookup table for colors.
bounding_boxes: Optional bounding boxes.
width: window width.
height: window height.
"""
self._init_data(data)
if lut is not None:
self.set_lut("labels", lut)
if bounding_boxes is not None:
prefix = Model.bounding_box_prefix
# Filament crashes if you have to many items, and anyway, hundreds
# of items is unweildy in a list. So combine items if we have too
# many.
group_size = int(math.floor(float(len(bounding_boxes)) / 100.0))
if group_size < 2:
box_data = [
Model.BoundingBoxData(prefix + str(bbox), [bbox])
for bbox in bounding_boxes
]
else:
box_data = []
current_group = []
n = len(bounding_boxes)
for i in range(0, n):
current_group.append(bounding_boxes[i])
if len(current_group) >= group_size or i == n - 1:
if i < n - 1:
name = prefix + "Boxes " + str(
i + 1 - group_size) + " - " + str(i)
else:
if len(current_group) > 1:
name = prefix + "Boxes " + str(
i + 1 - len(current_group)) + " - " + str(i)
else:
name = prefix + "Box " + str(i)
data = Model.BoundingBoxData(name, current_group)
box_data.append(data)
current_group = []
self._objects.bounding_box_data = box_data
else:
self._consolidate_bounding_boxes = True
self._visualize("3DTrans", width, height)
def _visualize(self, title, width, height):
gui.Application.instance.initialize()
self._init_user_interface(title, width, height)
self._3d.scene.downsample_threshold = 400000
# Turn all the objects off except the first one
for name, node in self._name2treenode.items():
node.checkbox.checked = True
self._3d.scene.show_geometry(name, False)
for name in [self._objects.data_names[0]]:
self._name2treenode[name].checkbox.checked = True
self._3d.scene.show_geometry(name, True)
self._on_display_tab_changed(0)
self._on_start_animation()
def on_done_ui():
# Add bounding boxes here: bounding boxes belonging to the dataset
# will not be loaded until now.
self._update_bounding_boxes()
self._update_datasource_combobox()
self._update_shaders_combobox()
# Display "colors" by default if available, "points" if not
available_attrs = self._get_available_attrs()
self._set_shader(self.SOLID_NAME, force_update=True)
if "colors" in available_attrs:
self._datasource_combobox.selected_text = "colors"
elif "points" in available_attrs:
self._datasource_combobox.selected_text = "points"
self._dont_update_geometry = True
self._on_datasource_changed(
self._datasource_combobox.selected_text,
self._datasource_combobox.selected_index)
self._update_geometry_colors()
self._dont_update_geometry = False
# _datasource_combobox was empty, now isn't, re-layout.
self.window.set_needs_layout()
self._update_geometry()
self.setup_camera()
self._load_geometries(self._objects.data_names, on_done_ui)
gui.Application.instance.run()
| 73,715
| 38.294243
| 129
|
py
|
3DTrans
|
3DTrans-master/tools/show_squence_demo/utils/components.py
|
import numpy as np
import open3d as o3d
from PIL import Image, ImageDraw
from colorsys import rgb_to_yiq
class LabelLUT:
"""The class to manage look-up table for assigning colors to labels."""
class Label:
def __init__(self, name, value, color):
self.name = name
self.value = value
self.color = color
Colors = [[0., 0., 0.], [0.96078431, 0.58823529, 0.39215686],
[0.96078431, 0.90196078, 0.39215686],
[0.58823529, 0.23529412, 0.11764706],
[0.70588235, 0.11764706, 0.31372549], [1., 0., 0.],
[0.11764706, 0.11764706, 1.], [0.78431373, 0.15686275, 1.],
[0.35294118, 0.11764706, 0.58823529], [1., 0., 1.],
[1., 0.58823529, 1.], [0.29411765, 0., 0.29411765],
[0.29411765, 0., 0.68627451], [0., 0.78431373, 1.],
[0.19607843, 0.47058824, 1.], [0., 0.68627451, 0.],
[0., 0.23529412,
0.52941176], [0.31372549, 0.94117647, 0.58823529],
[0.58823529, 0.94117647, 1.], [0., 0., 1.], [1.0, 1.0, 0.25],
[0.5, 1.0, 0.25], [0.25, 1.0, 0.25], [0.25, 1.0, 0.5],
[0.25, 1.0, 1.25], [0.25, 0.5, 1.25], [0.25, 0.25, 1.0],
[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.375, 0.375, 0.375],
[0.5, 0.5, 0.5], [0.625, 0.625, 0.625], [0.75, 0.75, 0.75],
[0.875, 0.875, 0.875]]
def __init__(self, label_to_names=None):
"""
Args:
label_to_names: Initialize the colormap with this mapping from
labels (int) to class names (str).
"""
self._next_color = 10
self.labels = {}
if label_to_names is not None:
for val in sorted(label_to_names.keys()):
self.add_label(label_to_names[val], val)
def add_label(self, name, value, color=None):
"""Adds a label to the table.
Example:
The following sample creates a LUT with 3 labels::
lut = ml3d.vis.LabelLUT()
lut.add_label('one', 1)
lut.add_label('two', 2)
lut.add_label('three', 3, [0,0,1]) # use blue for label 'three'
Args:
name: The label name as string.
value: The value associated with the label.
color: Optional RGB color. E.g., [0.2, 0.4, 1.0].
"""
if color is None:
if self._next_color >= len(self.Colors):
self._next_color = 0
color = self.Colors[self._next_color]
self._next_color += 1
else:
color = self.Colors[self._next_color]
self._next_color += 1
self.labels[value] = self.Label(name, value, color)
@classmethod
def get_colors(self, name='default', mode=None):
"""Return full list of colors in the lookup table.
Args:
name (str): Name of lookup table colormap. Only 'default' is
supported.
mode (str): Colormap mode. May be None (return as is), 'lightbg" to
move the dark colors earlier in the list or 'darkbg' to move
them later in the list. This will provide better visual
discrimination for the earlier classes.
Returns:
List of colors (R, G, B) in the LUT.
"""
if mode is None:
return self.Colors
dark_colors = list(
filter(lambda col: rgb_to_yiq(*col)[0] < 0.5, self.Colors))
light_colors = list(
filter(lambda col: rgb_to_yiq(*col)[0] >= 0.5, self.Colors))
if mode == 'lightbg':
return dark_colors + light_colors
if mode == 'darkbg':
return light_colors + dark_colors
class BoundingBox3D:
"""Class that defines an axially-oriented bounding box."""
next_id = 1
def __init__(self,
center,
front,
up,
left,
size,
label_class,
confidence,
meta=None,
show_class=False,
show_confidence=False,
show_meta=None,
meta_center=None,
identifier=None,
arrow_length=1.0):
"""Creates a bounding box.
Front, up, left define the axis of the box and must be normalized and
mutually orthogonal.
Args:
center: (x, y, z) that defines the center of the box.
front: normalized (i, j, k) that defines the front direction of the
box.
up: normalized (i, j, k) that defines the up direction of the box.
left: normalized (i, j, k) that defines the left direction of the
box.
size: (width, height, depth) that defines the size of the box, as
measured from edge to edge.
label_class: integer specifying the classification label. If an LUT
is specified in create_lines() this will be used to determine
the color of the box.
confidence: confidence level of the box.
meta: a user-defined string (optional).
show_class: displays the class label in text near the box
(optional).
show_confidence: displays the confidence value in text near the box
(optional).
show_meta: displays the meta string in text near the box (optional).
identifier: a unique integer that defines the id for the box
(optional, will be generated if not provided).
arrow_length: the length of the arrow in the front_direct. Set to
zero to disable the arrow (optional).
"""
assert (len(center) == 3)
assert (len(front) == 3)
assert (len(up) == 3)
assert (len(left) == 3)
assert (len(size) == 3)
assert (len(meta_center) == 3)
self.center = np.array(center, dtype="float32")
self.front = np.array(front, dtype="float32")
self.up = np.array(up, dtype="float32")
self.left = np.array(left, dtype="float32")
self.size = size
self.label_class = label_class
self.confidence = confidence
self.meta = meta
self.show_class = show_class
self.show_confidence = show_confidence
self.show_meta = show_meta
self.meta_center = meta_center
if identifier is not None:
self.identifier = identifier
else:
self.identifier = "box:" + str(BoundingBox3D.next_id)
BoundingBox3D.next_id += 1
self.arrow_length = arrow_length
def __repr__(self):
s = str(self.identifier) + " (class=" + str(
self.label_class) + ", conf=" + str(self.confidence)
if self.meta is not None:
s = s + ", meta=" + str(self.meta)
s = s + ")"
return s
@staticmethod
def create_lines(boxes, lut=None, out_format="lineset"):
"""Creates a LineSet that can be used to render the boxes.
Args:
boxes: the list of bounding boxes
lut: a ml3d.vis.LabelLUT that is used to look up the color based on
the label_class argument of the BoundingBox3D constructor. If
not provided, a color of 50% grey will be used. (optional)
out_format (str): Output format. Can be "lineset" (default) for the
Open3D lineset or "dict" for a dictionary of lineset properties.
Returns:
For out_format == "lineset": open3d.geometry.LineSet
For out_format == "dict": Dictionary of lineset properties
("vertex_positions", "line_indices", "line_colors", "bbox_labels",
"bbox_confidences").
"""
if out_format not in ('lineset', 'dict'):
raise ValueError("Please specify an output_format of 'lineset' "
"(default) or 'dict'.")
nverts = 14
nlines = 17
points = np.zeros((nverts * len(boxes), 3), dtype="float32")
indices = np.zeros((nlines * len(boxes), 2), dtype="int32")
colors = np.zeros((nlines * len(boxes), 3), dtype="float32")
for i, box in enumerate(boxes):
pidx = nverts * i
x = 0.5 * box.size[0] * box.left
y = 0.5 * box.size[1] * box.up
z = 0.5 * box.size[2] * box.front
arrow_tip = box.center + z - box.arrow_length * box.front
# arrow_mid = box.center + z + 0.60 * box.arrow_length * box.front
# head_length = 0.3 * box.arrow_length
# It seems to be substantially faster to assign directly for the
# points, as opposed to points[pidx:pidx+nverts] = np.stack((...))
points[pidx] = box.center + x + y + z
points[pidx + 1] = box.center - x + y + z
points[pidx + 2] = box.center - x + y - z
points[pidx + 3] = box.center + x + y - z
points[pidx + 4] = box.center + x - y + z
points[pidx + 5] = box.center - x - y + z
points[pidx + 6] = box.center - x - y - z
points[pidx + 7] = box.center + x - y - z
points[pidx + 8] = box.center + z
points[pidx + 9] = arrow_tip
# points[pidx + 10] = arrow_mid + head_length * box.up
# points[pidx + 11] = arrow_mid - head_length * box.up
# points[pidx + 12] = arrow_mid + head_length * box.left
# points[pidx + 13] = arrow_mid - head_length * box.left
points[pidx + 10] = arrow_tip
points[pidx + 11] = arrow_tip
points[pidx + 12] = arrow_tip
points[pidx + 13] = arrow_tip
# It is faster to break the indices and colors into their own loop.
for i, box in enumerate(boxes):
pidx = nverts * i
idx = nlines * i
indices[idx:idx +
nlines] = ((pidx, pidx + 1), (pidx + 1, pidx + 2),
(pidx + 2, pidx + 3), (pidx + 3, pidx),
(pidx + 4, pidx + 5), (pidx + 5, pidx + 6),
(pidx + 6, pidx + 7), (pidx + 7, pidx + 4),
(pidx + 0, pidx + 4), (pidx + 1, pidx + 5),
(pidx + 2, pidx + 6), (pidx + 3, pidx + 7),
(pidx + 8, pidx + 9), (pidx + 9, pidx + 10),
(pidx + 9, pidx + 11), (pidx + 9,
pidx + 12), (pidx + 9,
pidx + 13))
if lut is not None and box.label_class in lut.labels:
label = lut.labels[box.label_class]
c = (label.color[0], label.color[1], label.color[2])
else:
if box.confidence == -1.0:
c = (0., 1.0, 0.) # GT: Green
elif box.confidence >= 0 and box.confidence <= 1.0:
c = (1.0, 0., 0.) # Prediction: red
else:
c = (0.5, 0.5, 0.5) # Grey
colors[idx:idx +
nlines] = c # copies c to each element in the range
if out_format == "lineset":
lines = o3d.geometry.LineSet()
lines.points = o3d.utility.Vector3dVector(points)
lines.lines = o3d.utility.Vector2iVector(indices)
lines.colors = o3d.utility.Vector3dVector(colors)
elif out_format == "dict":
lines = {
"vertex_positions": points,
"line_indices": indices,
"line_colors": colors,
"bbox_labels": tuple(b.label_class for b in boxes),
"bbox_confidences": tuple(b.confidence for b in boxes)
}
return lines
@staticmethod
def project_to_img(boxes, img, lidar2img_rt=np.ones(4), lut=None):
"""Returns image with projected 3D bboxes
Args:
boxes: the list of bounding boxes
img: an RGB image
lidar2img_rt: 4x4 transformation from lidar frame to image plane
lut: a ml3d.vis.LabelLUT that is used to look up the color based on
the label_class argument of the BoundingBox3D constructor. If
not provided, a color of 50% grey will be used. (optional)
"""
lines = BoundingBox3D.create_lines(boxes, lut, out_format="dict")
points = lines["vertex_positions"]
indices = lines["line_indices"]
colors = lines["line_colors"]
pts_4d = np.concatenate(
[points.reshape(-1, 3),
np.ones((len(boxes) * 14, 1))], axis=-1)
pts_2d = pts_4d @ lidar2img_rt.T
pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=1e5)
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
imgfov_pts_2d = pts_2d[..., :2].reshape(len(boxes), 14, 2)
indices_2d = indices[..., :2].reshape(len(boxes), 17, 2)
colors_2d = colors[..., :3].reshape(len(boxes), 17, 3)
return BoundingBox3D.plot_rect3d_on_img(img,
len(boxes),
imgfov_pts_2d,
indices_2d,
colors_2d,
thickness=3)
@staticmethod
def plot_rect3d_on_img(img,
num_rects,
rect_corners,
line_indices,
color=None,
thickness=1):
"""Plot the boundary lines of 3D rectangular on 2D images.
Args:
img (numpy.array): The numpy array of image.
num_rects (int): Number of 3D rectangulars.
rect_corners (numpy.array): Coordinates of the corners of 3D
rectangulars. Should be in the shape of [num_rect, 8, 2] or
[num_rect, 14, 2] if counting arrows.
line_indices (numpy.array): indicates connectivity of lines between
rect_corners. Should be in the shape of [num_rect, 12, 2] or
[num_rect, 17, 2] if counting arrows.
color (tuple[int]): The color to draw bboxes. Default: (1.0, 1.0,
1.0), i.e. white.
thickness (int, optional): The thickness of bboxes. Default: 1.
"""
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
if color is None:
color = np.ones((line_indices.shape[0], line_indices.shape[1], 3))
for i in range(num_rects):
corners = rect_corners[i].astype(np.int)
# ignore boxes outside a certain threshold
interesting_corners_scale = 3.0
if min(corners[:, 0]
) < -interesting_corners_scale * img.shape[1] or max(
corners[:, 0]
) > interesting_corners_scale * img.shape[1] or min(
corners[:, 1]
) < -interesting_corners_scale * img.shape[0] or max(
corners[:, 1]) > interesting_corners_scale * img.shape[0]:
continue
for j, (start, end) in enumerate(line_indices[i]):
c = tuple(color[i][j] * 255) # TODO: not working
c = (int(c[0]), int(c[1]), int(c[2]))
if i != 0:
pt1 = (corners[(start) % (14 * i),
0], corners[(start) % (14 * i), 1])
pt2 = (corners[(end) % (14 * i),
0], corners[(end) % (14 * i), 1])
else:
pt1 = (corners[start, 0], corners[start, 1])
pt2 = (corners[end, 0], corners[end, 1])
draw.line([pt1, pt2], fill=c, width=thickness)
return np.array(img_pil).astype(np.uint8)
class Object3D(BoundingBox3D):
def __init__(self,
center,
size,
yaw,
name,
cls="",
arrow=0.,
score=0.,
id="",
text="",
thikness=1.5,
show_meta=False,
meta_center=None,
show_arrow=False):
self.yaw = yaw-np.pi*0.5
left = [np.cos(self.yaw), np.sin(self.yaw), 0]
front = [-np.sin(self.yaw), np.cos(self.yaw), 0]
up = [0, 0, 1]
self.score = score
self.cls = cls
self.name = name
self.id = id
self.thikness=thikness
show_name = self.name
if show_arrow is False:
self.arrow = 0.
else:
if arrow < 1.0:
self.arrow = size[2]*0.33
else: self.arrow = arrow
super().__init__(center, front, up, left, size,
label_class=show_name,
confidence=self.score,
meta=text,
show_class=False,
show_confidence=False,
show_meta=show_meta,
meta_center=meta_center,
identifier=None,
arrow_length=self.arrow)
class Colormap:
"""This class is used to create a color map for visualization of points."""
class Point:
"""Initialize the class.
Args:
value: The scalar value index of the point.
color: The color associated with the value.
"""
def __init__(self, value, color):
assert (value >= 0.0)
assert (value <= 1.0)
self.value = value
self.color = color
def __repr__(self):
"""Represent the color and value in the colormap."""
return "Colormap.Point(" + str(self.value) + ", " + str(
self.color) + ")"
# The value of each Point must be greater than the previous
# (e.g. [0.0, 0.1, 0.4, 1.0], not [0.0, 0.4, 0.1, 1.0]
def __init__(self, points):
self.points = points
def calc_u_array(self, values, range_min, range_max):
"""Generate the basic array based on the minimum and maximum range passed."""
range_width = (range_max - range_min)
return [
min(1.0, max(0.0, (v - range_min) / range_width)) for v in values
]
# (This is done by the shader now)
def calc_color_array(self, values, range_min, range_max):
"""Generate the color array based on the minimum and maximum range passed.
Args:
values: The index of values.
range_min: The minimum value in the range.
range_max: The maximum value in the range.
Returns:
An array of color index based on the range passed.
"""
u_array = self.calc_u_array(values, range_min, range_max)
tex = [[1.0, 0.0, 1.0]] * 128
n = float(len(tex) - 1)
idx = 0
for tex_idx in range(0, len(tex)):
x = float(tex_idx) / n
while idx < len(self.points) and x > self.points[idx].value:
idx += 1
if idx == 0:
tex[tex_idx] = self.points[0].color
elif idx == len(self.points):
tex[tex_idx] = self.points[-1].color
else:
p0 = self.points[idx - 1]
p1 = self.points[idx]
dist = p1.value - p0.value
# Calc weights between 0 and 1
w0 = 1.0 - (x - p0.value) / dist
w1 = (x - p0.value) / dist
c = [
w0 * p0.color[0] + w1 * p1.color[0],
w0 * p0.color[1] + w1 * p1.color[1],
w0 * p0.color[2] + w1 * p1.color[2]
]
tex[tex_idx] = c
return [tex[int(u * n)] for u in u_array]
# These are factory methods rather than class objects because
# the user may modify the colormaps that are used.
@staticmethod
def make_greyscale():
"""Generate a greyscale colormap."""
return Colormap([
Colormap.Point(0.0, [0.0, 0.0, 0.0]),
Colormap.Point(1.0, [1.0, 1.0, 1.0])
])
@staticmethod
def make_rainbow():
"""Generate the rainbow color array."""
return Colormap([
Colormap.Point(0.000, [0.0, 0.0, 1.0]),
Colormap.Point(0.125, [0.0, 0.5, 1.0]),
Colormap.Point(0.250, [0.0, 1.0, 1.0]),
Colormap.Point(0.375, [0.0, 1.0, 0.5]),
Colormap.Point(0.500, [0.0, 1.0, 0.0]),
Colormap.Point(0.625, [0.5, 1.0, 0.0]),
Colormap.Point(0.750, [1.0, 1.0, 0.0]),
Colormap.Point(0.875, [1.0, 0.5, 0.0]),
Colormap.Point(1.000, [1.0, 0.0, 0.0])
])
| 21,218
| 39.649425
| 85
|
py
|
3DTrans
|
3DTrans-master/tools/show_squence_demo/utils/__init__.py
|
from .gui import *
| 19
| 9
| 18
|
py
|
3DTrans
|
3DTrans-master/tools/ssl_utils/semi_train_utils.py
|
import glob
import os
import torch
import tqdm
from torch.nn.utils import clip_grad_norm_
from .sess import sess
from .pseudo_label import pseudo_label
from .iou_match_3d import iou_match_3d
from .se_ssd import se_ssd
semi_learning_methods = {
'SESS': sess,
'Pseudo-Label': pseudo_label,
'3DIoUMatch': iou_match_3d,
'SE_SSD': se_ssd,
}
def train_ssl_one_epoch(teacher_model, student_model, optimizer, labeled_loader, unlabeled_loader, epoch_id, lr_scheduler, accumulated_iter, ssl_cfg,
rank, tbar, total_it_each_epoch, labeled_loader_iter, unlabeled_loader_iter, tb_log=None, leave_pbar=False, dist=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
ud_teacher_batch_dict, ud_student_batch_dict = next(unlabeled_loader_iter)
except StopIteration:
unlabeled_loader_iter = iter(unlabeled_loader)
ud_teacher_batch_dict, ud_student_batch_dict = next(unlabeled_loader_iter)
try:
ld_teacher_batch_dict, ld_student_batch_dict = next(labeled_loader_iter)
except StopIteration:
labeled_loader_iter = iter(labeled_loader)
ld_teacher_batch_dict, ld_student_batch_dict = next(labeled_loader_iter)
#lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
optimizer.zero_grad()
loss, tb_dict, disp_dict = semi_learning_methods[ssl_cfg.NAME](
teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
ssl_cfg, epoch_id, dist
)
loss.backward()
clip_grad_norm_(student_model.parameters(), ssl_cfg.STUDENT.GRAD_NORM_CLIP)
optimizer.step()
lr_scheduler.step(accumulated_iter)
accumulated_iter += 1
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
# EMA Teacher
if ssl_cfg.TEACHER.NUM_ITERS_PER_UPDATE != -1:
ema_rampup_start, ema_start = ssl_cfg.TEACHER.EMA_EPOCH
assert ema_rampup_start <= ema_start
if epoch_id < ema_rampup_start:
pass
elif (epoch_id >= ema_rampup_start) and (epoch_id < ema_start):
if accumulated_iter % ssl_cfg.TEACHER.NUM_ITERS_PER_UPDATE == 0:
if dist:
#if rank == 0:
update_ema_variables(student_model.module.onepass, teacher_model.module.onepass, ssl_cfg.TEACHER.RAMPUP_EMA_MOMENTUM, accumulated_iter)
else:
update_ema_variables(student_model, teacher_model, ssl_cfg.TEACHER.RAMPUP_EMA_MOMENTUM, accumulated_iter)
elif epoch_id >= ema_start:
if accumulated_iter % ssl_cfg.TEACHER.NUM_ITERS_PER_UPDATE == 0:
if dist:
#if rank == 0:
update_ema_variables_with_fixed_momentum(student_model.module.onepass, teacher_model.module.onepass, ssl_cfg.TEACHER.EMA_MOMENTUM)
else:
update_ema_variables_with_fixed_momentum(student_model, teacher_model, ssl_cfg.TEACHER.EMA_MOMENTUM)
else:
raise Exception('Impossible condition for EMA update')
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_ssl_model(teacher_model, student_model, student_optimizer, labeled_loader, unlabeled_loader,
lr_scheduler, ssl_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
labeled_sampler, unlabeled_sampler,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False, dist=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(labeled_loader) # total iterations set to labeled set
assert merge_all_iters_to_one_epoch is False
labeled_loader_iter = iter(labeled_loader)
unlabeled_loader_iter = iter(unlabeled_loader)
for cur_epoch in tbar:
if labeled_sampler is not None:
labeled_sampler.set_epoch(cur_epoch)
if unlabeled_sampler is not None:
unlabeled_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < ssl_cfg.STUDENT.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_ssl_one_epoch(
teacher_model = teacher_model,
student_model = student_model,
optimizer = student_optimizer,
labeled_loader = labeled_loader,
unlabeled_loader = unlabeled_loader,
epoch_id = cur_epoch,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, ssl_cfg=ssl_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
labeled_loader_iter=labeled_loader_iter,
unlabeled_loader_iter=unlabeled_loader_iter,
dist = dist
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
student_ckpt_name = ckpt_save_dir / 'student' / ('checkpoint_epoch_%d' % trained_epoch)
if dist:
save_checkpoint(
checkpoint_state(student_model.module.onepass, student_optimizer, trained_epoch, accumulated_iter), filename=student_ckpt_name,
)
else:
save_checkpoint(
checkpoint_state(student_model, student_optimizer, trained_epoch, accumulated_iter), filename=student_ckpt_name,
)
teacher_ckpt_name = ckpt_save_dir / 'teacher'/ ('checkpoint_epoch_%d' % trained_epoch)
if dist:
save_checkpoint(
checkpoint_state(teacher_model.module.onepass, student_optimizer, trained_epoch, accumulated_iter), filename=teacher_ckpt_name,
)
else:
save_checkpoint(
checkpoint_state(teacher_model, student_optimizer, trained_epoch, accumulated_iter), filename=teacher_ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 2), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
"""
def update_ema_variables_with_fixed_momentum(model, ema_model, alpha):
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
"""
| 9,752
| 42.346667
| 159
|
py
|
3DTrans
|
3DTrans-master/tools/ssl_utils/semi_utils.py
|
import torch
import numpy as np
from pcdet.models.model_utils import model_nms_utils
try:
import kornia
except:
pass
def load_data_to_gpu(batch_dict):
# for key, val in batch_dict.items():
# if not isinstance(val, np.ndarray):
# continue
# if key in ['frame_id', 'metadata', 'calib', 'image_shape']:
# continue
# batch_dict[key] = torch.from_numpy(val).float().cuda()
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
elif key in ['frame_id', 'metadata', 'calib']:
continue
elif key in ['images']:
batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous()
elif key in ['image_shape']:
batch_dict[key] = torch.from_numpy(val).int().cuda()
elif key in ['db_flag']:
continue
else:
batch_dict[key] = torch.from_numpy(val).float().cuda()
"""
Reverse augmentation transform
"""
def random_world_flip(box_preds, params, reverse = False):
if reverse:
if 'y' in params:
box_preds[:, 0] = -box_preds[:, 0]
box_preds[:, 6] = -(box_preds[:, 6] + np.pi)
if 'x' in params:
box_preds[:, 1] = -box_preds[:, 1]
box_preds[:, 6] = -box_preds[:, 6]
else:
if 'x' in params:
box_preds[:, 1] = -box_preds[:, 1]
box_preds[:, 6] = -box_preds[:, 6]
if 'y' in params:
box_preds[:, 0] = -box_preds[:, 0]
box_preds[:, 6] = -(box_preds[:, 6] + np.pi)
return box_preds
def random_world_rotation(box_preds, params, reverse = False):
if reverse:
noise_rotation = -params
else:
noise_rotation = params
angle = torch.tensor([noise_rotation]).to(box_preds.device)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(1)
ones = angle.new_ones(1)
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).reshape(3, 3).float()
box_preds[:, :3] = torch.matmul(box_preds[:, :3], rot_matrix)
box_preds[:, 6] += noise_rotation
return box_preds
def random_world_scaling(box_preds, params, reverse = False):
if reverse:
noise_scale = 1.0/params
else:
noise_scale = params
box_preds[:, :6] *= noise_scale
return box_preds
@torch.no_grad()
def reverse_transform(teacher_boxes, teacher_dict, student_dict):
augmentation_functions = {
'random_world_flip': random_world_flip,
'random_world_rotation': random_world_rotation,
'random_world_scaling': random_world_scaling
}
for bs_idx, teacher_box in enumerate(teacher_boxes):
teacher_aug_list = teacher_dict['augmentation_list'][bs_idx]
student_aug_list = student_dict['augmentation_list'][bs_idx]
teacher_aug_param = teacher_dict['augmentation_params'][bs_idx]
student_aug_param = student_dict['augmentation_params'][bs_idx]
box_preds = teacher_box['pred_boxes']
# inverse teacher augmentation
teacher_aug_list = teacher_aug_list[::-1]
for key in teacher_aug_list:
aug_params = teacher_aug_param[key]
aug_func = augmentation_functions[key]
box_preds = aug_func(box_preds, aug_params, reverse = True)
# student_augmentation
for key in student_aug_list:
aug_params = student_aug_param[key]
aug_func = augmentation_functions[key]
box_preds = aug_func(box_preds, aug_params, reverse = False)
teacher_box['pred_boxes'] = box_preds
return teacher_boxes
"""
Filter predicted boxes with conditions
"""
def filter_boxes(batch_dict, cfgs):
batch_size = batch_dict['batch_size']
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
max_cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
final_boxes = box_preds
final_labels = label_preds
final_cls_preds = cls_preds
if cfgs.get('FILTER_BY_NMS', False):
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=max_cls_preds, box_preds=final_boxes,
nms_config=cfgs.NMS.NMS_CONFIG,
score_thresh=cfgs.NMS.SCORE_THRESH
)
final_labels = final_labels[selected]
final_boxes = final_boxes[selected]
final_cls_preds = final_cls_preds[selected]
max_cls_preds = max_cls_preds[selected]
if cfgs.get('FILTER_BY_SCORE_THRESHOLD', False):
selected = max_cls_preds > cfgs.SCORE_THRESHOLD
final_labels = final_labels[selected]
final_boxes = final_boxes[selected]
final_cls_preds = final_cls_preds[selected]
max_cls_preds = max_cls_preds[selected]
if cfgs.get('FILTER_BY_TOPK', False):
topk = min(max_cls_preds.shape[0], cfgs.TOPK)
selected = torch.topk(max_cls_preds, topk)[1]
final_labels = final_labels[selected]
final_boxes = final_boxes[selected]
final_cls_preds = final_cls_preds[selected]
max_cls_preds = max_cls_preds[selected]
# added filtering boxes with size 0
zero_mask = (final_boxes[:, 3:6] != 0).all(1)
final_boxes = final_boxes[zero_mask]
final_labels = final_labels[zero_mask]
final_cls_preds = final_cls_preds[zero_mask]
record_dict = {
'pred_boxes': final_boxes,
'pred_cls_preds': final_cls_preds,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts
"""
Generate gt_boxes in data_dict with prediction
"""
@torch.no_grad()
def construct_pseudo_label(boxes):
box_list = []
num_gt_list = []
for bs_idx, box in enumerate(boxes):
box_preds = box['pred_boxes']
label_preds = box['pred_labels'].float().unsqueeze(-1)
num_gt_list.append(box_preds.shape[0])
box_list.append(torch.cat([box_preds, label_preds], dim=1))
batch_size = len(boxes)
num_max_gt = max(num_gt_list)
gt_boxes = box_list[0].new_zeros((batch_size, num_max_gt, 8))
for bs_idx in range(batch_size):
num_gt = num_gt_list[bs_idx]
gt_boxes[bs_idx, :num_gt, :] = box_list[bs_idx]
return gt_boxes
| 7,199
| 34.46798
| 91
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.