repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
neural-splines | neural-splines-main/fit.py | import argparse
import numpy as np
import point_cloud_utils as pcu
import torch
from skimage.measure import marching_cubes
from neural_splines import load_point_cloud, fit_model_to_pointcloud, eval_model_on_grid, point_cloud_bounding_box
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("input_point_cloud", type=str, help="Path to the input point cloud to reconstruct.")
argparser.add_argument("num_nystrom_samples", type=int, default=-1,
help="Number of Nyström samples to use for kernel ridge regression. "
"If negative, don't use Nyström sampling."
"This is the number of basis centers to use to represent the final function. "
"If this value is too small, the reconstruction can miss details in the input. "
"Values between 10-100 times sqrt(N) (where N = number of input points) are "
"generally good depending on the complexity of the input shape.")
argparser.add_argument("grid_size", type=int,
help="When reconstructing the mesh, use this many voxels along the longest side of the "
"bounding box. Default is 128.")
argparser.add_argument("--trim", type=float, default=-1.0,
help="If set to a positive value, trim vertices of the reconstructed mesh whose nearest "
"point in the input is greater than this value. The units of this argument are voxels "
"(where the grid_size determines the size of a voxel) Default is -1.0.")
argparser.add_argument("--eps", type=float, default=0.05,
help="Perturbation amount for finite differencing in voxel units. i.e. we perturb points by "
"eps times the diagonal length of a voxel "
"(where the grid_size determines the size of a voxel). "
"To approximate the gradient of the function, we sample points +/- eps "
"along the normal direction.")
argparser.add_argument("--scale", type=float, default=1.1,
help="Reconstruct the surface in a bounding box whose diameter is --scale times bigger than"
" the diameter of the bounding box of the input points. Defaults is 1.1.")
argparser.add_argument("--regularization", type=float, default=1e-10,
help="Regularization penalty for kernel ridge regression. Default is 1e-10.")
argparser.add_argument("--nystrom-mode", type=str, default="blue-noise",
help="How to generate nystrom samples. Default is 'k-means'. Must be one of "
"(1) 'random': choose Nyström samples at random from the input, "
"(2) 'blue-noise': downsample the input with blue noise to get Nyström samples, or "
"(3) 'k-means': use k-means clustering to generate Nyström samples. "
"Default is 'blue-noise'")
argparser.add_argument("--voxel-downsample-threshold", type=int, default=150_000,
help="If the number of input points is greater than this value, downsample it by "
"averaging points and normals within voxels on a grid. The size of the voxel grid is "
"determined via the --grid-size argument. Default is 150_000."
"NOTE: This can massively speed up reconstruction for very large point clouds and "
"generally won't throw away any details.")
argparser.add_argument("--kernel", type=str, default="neural-spline",
help="Which kernel to use. Must be one of 'neural-spline', 'spherical-laplace', or "
"'linear-angle'. Default is 'neural-spline'."
"NOTE: The spherical laplace is a good approximation to the neural tangent kernel"
"(see https://arxiv.org/pdf/2007.01580.pdf for details)")
argparser.add_argument("--seed", type=int, default=-1, help="Random number generator seed to use.")
argparser.add_argument("--out", type=str, default="recon.ply", help="Path to file to save reconstructed mesh in.")
argparser.add_argument("--save-grid", action="store_true",
help="If set, save the function evaluated on a voxel grid to {out}.grid.npy "
"where out is the value of the --out argument.")
argparser.add_argument("--save-points", action="store_true",
help="If set, save the tripled input points, their occupancies, and the Nyström samples "
"to an npz file named {out}.pts.npz where out is the value of the --out argument.")
argparser.add_argument("--cg-max-iters", type=int, default=20,
help="Maximum number of conjugate gradient iterations. Default is 20.")
argparser.add_argument("--cg-stop-thresh", type=float, default=1e-5,
help="Stop threshold for the conjugate gradient algorithm. Default is 1e-5.")
argparser.add_argument("--dtype", type=str, default="float64",
help="Scalar type of the data. Must be one of 'float32' or 'float64'. "
"Warning: float32 may not work very well for complicated inputs.")
argparser.add_argument("--outer-layer-variance", type=float, default=0.001,
help="Variance of the outer layer of the neural network from which the neural "
"spline kernel arises from. Default is 0.001.")
argparser.add_argument("--use-abs-units", action="store_true",
help="If set, then use absolute units instead of voxel units for --eps and --trim.")
argparser.add_argument("--verbose", action="store_true", help="Spam your terminal with debug information")
args = argparser.parse_args()
if args.dtype == "float64":
dtype = torch.float64
elif args.dtype == "float32":
dtype = torch.float32
else:
raise ValueError(f"invalid --dtype argument. Must be one of 'float32' or 'float64' but got {args.dtype}")
if args.seed > 0:
seed = args.seed
else:
seed = np.random.randint(2 ** 32 - 1)
torch.manual_seed(seed)
np.random.seed(seed)
print("Using random seed", seed)
x, n = load_point_cloud(args.input_point_cloud, dtype=dtype)
scaled_bbox = point_cloud_bounding_box(x, args.scale)
out_grid_size = torch.round(scaled_bbox[1] / scaled_bbox[1].max() * args.grid_size).to(torch.int32)
voxel_size = scaled_bbox[1] / out_grid_size # size of one voxel
# Downsample points to grid resolution if there are enough points
if x.shape[0] > args.voxel_downsample_threshold:
print("Downsampling input point cloud to voxel resolution.")
x, n, _ = pcu.downsample_point_cloud_voxel_grid(voxel_size, x.numpy(), n.numpy(),
min_bound=scaled_bbox[0],
max_bound=scaled_bbox[0] + scaled_bbox[1])
x, n = torch.from_numpy(x), torch.from_numpy(n)
# Finite differencing epsilon in world units
if args.use_abs_units:
eps_world_coords = args.eps
else:
eps_world_coords = args.eps * torch.norm(voxel_size).item()
model, tx = fit_model_to_pointcloud(x, n, num_ny=args.num_nystrom_samples, eps=eps_world_coords,
kernel=args.kernel, reg=args.regularization, ny_mode=args.nystrom_mode,
cg_max_iters=args.cg_max_iters, cg_stop_thresh=args.cg_stop_thresh,
outer_layer_variance=args.outer_layer_variance)
recon = eval_model_on_grid(model, scaled_bbox, tx, out_grid_size)
v, f, n, _ = marching_cubes(recon.numpy(), level=0.0, spacing=voxel_size)
v += scaled_bbox[0].numpy() + 0.5 * voxel_size.numpy()
# Possibly trim regions which don't contain samples
if args.trim > 0.0:
# Trim distance in world coordinates
if args.use_abs_units:
trim_dist_world = args.trim
else:
trim_dist_world = args.trim * torch.norm(voxel_size).item()
nn_dist, _ = pcu.k_nearest_neighbors(v, x.numpy(), k=2)
nn_dist = nn_dist[:, 1]
f_mask = np.stack([nn_dist[f[:, i]] < trim_dist_world for i in range(f.shape[1])], axis=-1)
f_mask = np.all(f_mask, axis=-1)
f = f[f_mask]
pcu.save_mesh_vfn(args.out, v.astype(np.float32), f.astype(np.int32), n.astype(np.float32))
if args.save_grid:
np.savez(args.out + ".grid", grid=recon.detach().cpu().numpy(), bbox=[b.numpy() for b in scaled_bbox])
if args.save_points:
x_ny = model.ny_points_[:, :3] if model.ny_points_ is not None else None
np.savez(args.out + ".pts",
x=x.detach().cpu().numpy(),
n=n.detach().cpu().numpy(),
eps=args.eps,
x_ny=x_ny.detach().cpu().numpy())
if __name__ == "__main__":
main()
| 9,506 | 60.733766 | 120 | py |
neural-splines | neural-splines-main/neural_splines/falkon_kernels.py | import functools
from abc import ABC
from typing import Optional
import cupy as cp
import numpy as np
import torch
from falkon.kernels import Kernel, KeopsKernelMixin
from falkon.options import FalkonOptions
from falkon.sparse.sparse_tensor import SparseTensor
from torch.utils.dlpack import to_dlpack
def _extract_float(d):
if isinstance(d, torch.Tensor):
try:
# tensor.item() works if tensor is a scalar, otherwise it throws
# a value error.
return d.item()
except ValueError:
raise ValueError("Item is not a scalar")
else:
try:
return float(d)
except TypeError:
raise TypeError("Item must be a scalar or a tensor.")
class NeuralSplineKernel(Kernel, KeopsKernelMixin, ABC):
kernel_type = "angle"
def __init__(self, variance: float = 1.0, opt: Optional[FalkonOptions] = None):
super().__init__("NeuralSpline", self.kernel_type, opt)
self.debug = opt.debug if opt is not None else False
self.variance = _extract_float(variance)
def extra_mem(self):
return {
# We transpose X2 in _apply
'nd': 0,
'md': 1,
# Norm results in prepare
'm': 0,
'n': 0,
# We do a copy in _apply
'nm': 1,
}
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt):
if self.debug:
print(f"NeuralSpline._keops_mmv_impl(X1={X1.shape}, X2={X2.shape}, v, kernel, out, opt)")
theta = 'two * Atan2(Norm2(Norm2(Y) * X - Norm2(X) * Y), Norm2(Norm2(Y) * X + Norm2(X) * Y))'
norm_xy = '(Norm2(X) * Norm2(Y))'
j01 = f'({norm_xy} * (Sin({theta}) + (one + variance) * (pi - {theta}) * Cos({theta})))'
formula = f'({j01} / pi) * v'
aliases = [
'X = Vi(%d)' % (X1.shape[1]),
'Y = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'pi = Pm(1)',
'variance = Pm(1)',
'one = Pm(1)',
'two = Pm(1)'
]
other_vars = [torch.tensor([np.pi]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([self.variance]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([1.0]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([2.0]).to(dtype=X1.dtype, device=X1.device)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2, **kwargs):
if self.debug:
print(f"NeuralSpline._prepare(X1={X1.shape}, X2={X2.shape}, *kwargs)")
return []
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
raise NotImplementedError("NeuralSpline does not implement sparse prepare")
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor):
if self.debug:
print(f"NeuralSpline._apply(X1={X1.shape}, X2={X2.shape}, out={out.shape})")
kernel_code = r'''
#define PI (DTYPE) (3.1415926535897932384626433832795028841971693993751058209749445923078164062)
#define ONE (DTYPE) (1.0)
extern "C" __global__
void stable_kernel(const DTYPE* x1, const DTYPE* x2, DTYPE* out, const double variance,
const int N, int M, int D) {
const int I = (blockIdx.x * blockDim.x) + threadIdx.x;
const int J = (blockIdx.y * blockDim.y) + threadIdx.y;
if (I >= N || J >= M) {
return;
}
DTYPE norm_x = (DTYPE) 0.0; //normf(D, &x1[I*D]);
DTYPE norm_y = (DTYPE) 0.0; //normf(D, &x2[J*D]);
#pragma unroll
for (int k = 0; k < D; k += 1) {
norm_x = fma(x1[I * D + k], x1[I * D + k], norm_x);
norm_y = fma(x2[J * D + k], x2[J * D + k], norm_y);
}
norm_x = sqrt(norm_x);
norm_y = sqrt(norm_y);
DTYPE arg1 = (DTYPE) 0.0;
DTYPE arg2 = (DTYPE) 0.0;
#pragma unroll
for (int k = 0; k < D; k += 1) {
DTYPE x1_ik = x1[I * D + k];
DTYPE x2_jk = x2[J * D + k];
DTYPE a1 = norm_y * x1_ik - norm_x * x2_jk;
DTYPE a2 = norm_y * x1_ik + norm_x * x2_jk;
arg1 = fma(a1, a1, arg1);
arg2 = fma(a2, a2, arg2);
}
arg1 = sqrt(arg1);
arg2 = sqrt(arg2);
DTYPE angle = 2.0 * atan2(arg1, arg2);
DTYPE norm_xy = norm_x * norm_y;
DTYPE cos_angle = cos(angle);
DTYPE sin_angle = sin(angle);
DTYPE opv = ONE + (DTYPE)(variance);
DTYPE K = norm_xy * (sin_angle + opv * (PI - angle) * cos_angle) / PI;
out[I * M + J] = K;
}
'''
assert X1.dtype == X2.dtype == out.dtype, "X1, X2, and out don't have the same dtype"
assert X1.device == X2.device == out.device, "X1, X2, and out are not on the same device"
assert out.device.index is not None, "None device index"
if X1.dtype == torch.float32:
str_dtype = "float"
cupy_dtype = cp.float32
elif X1.dtype == torch.float64:
str_dtype = "double"
cupy_dtype = cp.float64
else:
raise ValueError("Invalid dtype must be float32 or float64")
kernel_code = kernel_code.replace("DTYPE", str_dtype)
kernel = cp.RawKernel(kernel_code, 'stable_kernel')
# The .contiguous should be a no-op in both these cases, but add them in for good measure
X1 = X1.contiguous()
X2 = X2.T.contiguous()
# Convert X1 and X2 to CuPy arrays.
x1cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X1))
x2cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X2))
with cp.cuda.Device(out.device.index):
outcp = cp.zeros((out.shape[0], out.shape[1]), dtype=cupy_dtype)
# Run the CUDA kernel to build the matrix K
pt_dim = int(X1.shape[1])
dims = int(X1.shape[0]), int(X2.shape[0])
threads_per_block = (16, 16) # TODO: Maybe hardcoding this is bad
blocks_per_grid = tuple((dims[i] + threads_per_block[i] - 1) // threads_per_block[i] for i in range(2))
kernel(blocks_per_grid, threads_per_block, (x1cp, x2cp, outcp, self.variance, dims[0], dims[1], pt_dim))
cp.cuda.stream.get_current_stream().synchronize() # Need to synchronize so we can copy to PyTorch
# print("COPYING CUPY OUT TO PYTORCH")
# print("OUT CUPY\n", outcp)
# Copy the kernel back into the output PyTorch tensor
outcp_dlpack = outcp.toDlpack()
out_dlpack = torch.utils.dlpack.from_dlpack(outcp_dlpack)
out.copy_(out_dlpack)
# print("OUT PYTORCH\n", out)
# rand_idx_i, rand_idx_j = np.random.randint(X1.shape[0]), np.random.randint(X2.shape[0])
# xi, xj = X1[rand_idx_i].detach().cpu().numpy(), X2[rand_idx_j].detach().cpu().numpy()
# nxi, nxj = np.linalg.norm(xi), np.linalg.norm(xj)
# angle1, angle2 = np.linalg.norm(nxj * xi - nxi * xj), np.linalg.norm(nxj * xi + nxi * xj)
# angle = 2.0 * np.arctan2(angle1, angle2)
# kij = nxi * nxj * (np.sin(angle) + (1.0 + self.variance) * (np.pi - angle) * np.cos(angle)) / np.pi
# print(np.abs(kij - out[rand_idx_i, rand_idx_j].item()))
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
raise NotImplementedError("NeuralSpline does not implement sparse apply")
def _finalize(self, A: torch.Tensor, d):
if self.debug:
print(f"NeuralSpline._finalize(A={A.shape}, d)")
return A
def __str__(self):
return f"NeuralSplineKernel()"
def __repr__(self):
return self.__str__()
class LaplaceKernelSphere(Kernel, KeopsKernelMixin, ABC):
kernel_type = "angle"
def __init__(self, alpha, gamma, opt: Optional[FalkonOptions] = None):
super().__init__("LaplaceKernelSphere", self.kernel_type, opt)
self.debug = opt.debug if opt is not None else False
self.alpha = _extract_float(alpha)
self.gamma = _extract_float(gamma)
def extra_mem(self):
return {
# We transpose X2 in _apply
'nd': 0,
'md': 1,
# Norm results in prepare
'm': 0,
'n': 0,
# We do a copy in _apply
'nm': 1,
}
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt):
if self.debug:
print("LaplaceKernelSphere._keops_mmv_impl(X1, X2, v, kernel, out, opt)")
theta = 'two * Atan2(Norm2(Norm2(Y) * X - Norm2(X) * Y), Norm2(Norm2(Y) * X + Norm2(X) * Y))'
norm_xy = '(Norm2(X) * Norm2(Y))'
j01 = f'({norm_xy} * (Exp(alpha * Powf(one - Cos({theta}), gamma))))'
formula = f'({j01}) * v'
aliases = [
'X = Vi(%d)' % (X1.shape[1]),
'Y = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'alpha = Pm(1)',
'gamma = Pm(1)',
'one = Pm(1)',
'two = Pm(1)',
]
other_vars = [torch.tensor([self.alpha]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([self.gamma]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([1.0]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([2.0]).to(dtype=X1.dtype, device=X1.device)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2, **kwargs):
if self.debug:
print("LaplaceKernelSphere._prepare(X1, X2, *kwargs)")
return []
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
raise NotImplementedError("LaplaceKernelSphere does not implement sparse prepare")
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor):
if self.debug:
print("LaplaceKernelSphere._apply(X1, X2, out)")
kernel_code = r'''
#define PI (DTYPE) (3.1415926535897932384626433832795028841971693993751058209749445923078164062)
#define ONE (DTYPE) (1.0)
extern "C" __global__
void stable_kernel(const DTYPE* x1, const DTYPE* x2, DTYPE* out, const double alpha, double gamma,
const int N, int M, int D) {
const int I = (blockIdx.x * blockDim.x) + threadIdx.x;
const int J = (blockIdx.y * blockDim.y) + threadIdx.y;
if (I >= N || J >= M) {
return;
}
DTYPE norm_x = (DTYPE) 0.0; //normf(D, &x1[I*D]);
DTYPE norm_y = (DTYPE) 0.0; //normf(D, &x2[J*D]);
#pragma unroll
for (int k = 0; k < D; k += 1) {
norm_x = fma(x1[I * D + k], x1[I * D + k], norm_x);
norm_y = fma(x2[J * D + k], x2[J * D + k], norm_y);
}
norm_x = sqrt(norm_x);
norm_y = sqrt(norm_y);
DTYPE arg1 = (DTYPE) 0.0;
DTYPE arg2 = (DTYPE) 0.0;
#pragma unroll
for (int k = 0; k < D; k += 1) {
DTYPE x1_ik = x1[I * D + k];
DTYPE x2_jk = x2[J * D + k];
DTYPE a1 = norm_y * x1_ik - norm_x * x2_jk;
DTYPE a2 = norm_y * x1_ik + norm_x * x2_jk;
arg1 = fma(a1, a1, arg1);
arg2 = fma(a2, a2, arg2);
}
arg1 = sqrt(arg1);
arg2 = sqrt(arg2);
DTYPE angle = 2.0 * atan2(arg1, arg2);
DTYPE norm_xy = norm_x * norm_y;
DTYPE cos_angle = cos(angle);
DTYPE K = norm_xy * exp((DTYPE) alpha * pow(ONE - cos_angle, (DTYPE) gamma));
out[I * M + J] = K;
}
'''
assert X1.dtype == X2.dtype == out.dtype, "X1, X2, and out don't have the same dtype"
assert X1.device == X2.device == out.device, "X1, X2, and out are not on the same device"
assert out.device.index is not None, "None device index"
if X1.dtype == torch.float32:
str_dtype = "float"
cupy_dtype = cp.float32
elif X1.dtype == torch.float64:
str_dtype = "double"
cupy_dtype = cp.float64
else:
raise ValueError("Invalid dtype must be float32 or float64")
kernel_code = kernel_code.replace("DTYPE", str_dtype)
kernel = cp.RawKernel(kernel_code, 'stable_kernel')
# The .contiguous should be a no-op in both these cases, but add them in for good measure
X1 = X1.contiguous()
X2 = X2.T.contiguous()
# Convert X1 and X2 to CuPy arrays.
x1cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X1))
x2cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X2))
with cp.cuda.Device(out.device.index):
outcp = cp.zeros((out.shape[0], out.shape[1]), dtype=cupy_dtype)
# Run the CUDA kernel to build the matrix K
pt_dim = int(X1.shape[1])
dims = int(X1.shape[0]), int(X2.shape[0])
threads_per_block = (16, 16) # TODO: Maybe hardcoding this is bad
blocks_per_grid = tuple((dims[i] + threads_per_block[i] - 1) // threads_per_block[i] for i in range(2))
kernel(blocks_per_grid, threads_per_block,
(x1cp, x2cp, outcp, self.alpha, self.gamma, dims[0], dims[1], pt_dim))
cp.cuda.stream.get_current_stream().synchronize() # Need to synchronize so we can copy to PyTorch
# Copy the kernel back into the output PyTorch tensor
outcp_dlpack = outcp.toDlpack()
out_dlpack = torch.utils.dlpack.from_dlpack(outcp_dlpack)
out.copy_(out_dlpack)
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
raise NotImplementedError("LaplaceKernelSphere does not implement sparse apply")
def _finalize(self, A, d):
if self.debug:
print("LaplaceKernelSphere._finalize(A, d)")
return A
def __str__(self):
return f"LaplaceKernelSphere(alpha={self.alpha})"
def __repr__(self):
return self.__str__()
class LinearAngleKernel(Kernel, KeopsKernelMixin, ABC):
kernel_type = "angle"
def __init__(self, multiply_norm=False, opt: Optional[FalkonOptions] = None):
super().__init__("LinearAngleKernel", self.kernel_type, opt)
self.debug = opt.debug if opt is not None else False
self.multiply_norm = multiply_norm
def extra_mem(self):
return {
# We transpose X2 in _apply
'nd': 0,
'md': 1,
# Norm results in prepare
'm': 0,
'n': 0,
# We do a copy in _apply
'nm': 1,
}
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt):
if self.debug:
print(f"LinearAngleKernel._keops_mmv_impl(X1={X1.shape}, X2={X2.shape}, v, kernel, out, opt)")
theta = 'two * Atan2(Norm2(Norm2(Y) * X - Norm2(X) * Y), Norm2(Norm2(Y) * X + Norm2(X) * Y))'
if self.multiply_norm:
norm_xy = '(Norm2(X) * Norm2(Y))'
j01 = f'({norm_xy} * (pi - {theta}))'
else:
j01 = f'(pi - {theta})'
formula = f'({j01} / pi) * v'
aliases = [
'X = Vi(%d)' % (X1.shape[1]),
'Y = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'pi = Pm(1)',
'two = Pm(1)'
]
other_vars = [torch.tensor([np.pi]).to(dtype=X1.dtype, device=X1.device),
torch.tensor([2.0]).to(dtype=X1.dtype, device=X1.device)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2, **kwargs):
if self.debug:
print(f"LinearAngleKernel._prepare(X1={X1.shape}, X2={X2.shape}, *kwargs)")
return []
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
raise NotImplementedError("LinearAngleKernel does not implement sparse prepare")
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor):
if self.debug:
print(f"LinearAngleKernel._apply(X1={X1.shape}, X2={X2.shape}, out={out.shape})")
kernel_code = r'''
#define PI (DTYPE) (3.1415926535897932384626433832795028841971693993751058209749445923078164062)
#define ONE (DTYPE) (1.0)
__MUL_NORM_DEFINE__
extern "C" __global__
void stable_kernel(const DTYPE* x1, const DTYPE* x2, DTYPE* out,
const int N, int M, int D) {
const int I = (blockIdx.x * blockDim.x) + threadIdx.x;
const int J = (blockIdx.y * blockDim.y) + threadIdx.y;
if (I >= N || J >= M) {
return;
}
DTYPE norm_x = (DTYPE) 0.0; //normf(D, &x1[I*D]);
DTYPE norm_y = (DTYPE) 0.0; //normf(D, &x2[J*D]);
#pragma unroll
for (int k = 0; k < D; k += 1) {
norm_x = fma(x1[I * D + k], x1[I * D + k], norm_x);
norm_y = fma(x2[J * D + k], x2[J * D + k], norm_y);
}
norm_x = sqrt(norm_x);
norm_y = sqrt(norm_y);
DTYPE arg1 = (DTYPE) 0.0;
DTYPE arg2 = (DTYPE) 0.0;
#pragma unroll
for (int k = 0; k < D; k += 1) {
DTYPE x1_ik = x1[I * D + k];
DTYPE x2_jk = x2[J * D + k];
DTYPE a1 = norm_y * x1_ik - norm_x * x2_jk;
DTYPE a2 = norm_y * x1_ik + norm_x * x2_jk;
arg1 = fma(a1, a1, arg1);
arg2 = fma(a2, a2, arg2);
}
arg1 = sqrt(arg1);
arg2 = sqrt(arg2);
DTYPE angle = 2.0 * atan2(arg1, arg2);
#ifdef MULTIPLY_NORM
DTYPE norm_xy = norm_x * norm_y;
DTYPE K = norm_xy * (PI - angle) / PI;
#else
DTYPE K = (PI - angle) / PI;
#endif
out[I * M + J] = K;
}
'''
assert X1.dtype == X2.dtype == out.dtype, "X1, X2, and out don't have the same dtype"
assert X1.device == X2.device == out.device, "X1, X2, and out are not on the same device"
assert out.device.index is not None, "None device index"
if X1.dtype == torch.float32:
str_dtype = "float"
cupy_dtype = cp.float32
elif X1.dtype == torch.float64:
str_dtype = "double"
cupy_dtype = cp.float64
else:
raise ValueError("Invalid dtype must be float32 or float64")
kernel_code = kernel_code.replace("DTYPE", str_dtype)
if self.multiply_norm:
kernel_code = kernel_code.replace("__MUL_NORM_DEFINE__", "#define MULTIPLY_NORM\n")
else:
kernel_code = kernel_code.replace("__MUL_NORM_DEFINE__", "\n")
kernel = cp.RawKernel(kernel_code, 'stable_kernel')
# The .contiguous should be a no-op in both these cases, but add them in for good measure
X1 = X1.contiguous()
X2 = X2.T.contiguous()
# Convert X1 and X2 to CuPy arrays.
x1cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X1))
x2cp = cp.fromDlpack(torch.utils.dlpack.to_dlpack(X2))
with cp.cuda.Device(out.device.index):
outcp = cp.zeros((out.shape[0], out.shape[1]), dtype=cupy_dtype)
# Run the CUDA kernel to build the matrix K
pt_dim = int(X1.shape[1])
dims = int(X1.shape[0]), int(X2.shape[0])
threads_per_block = (16, 16) # TODO: Maybe hardcoding this is bad
blocks_per_grid = tuple((dims[i] + threads_per_block[i] - 1) // threads_per_block[i] for i in range(2))
kernel(blocks_per_grid, threads_per_block, (x1cp, x2cp, outcp, dims[0], dims[1], pt_dim))
cp.cuda.stream.get_current_stream().synchronize() # Need to synchronize so we can copy to PyTorch
# print("COPYING CUPY OUT TO PYTORCH")
# print("OUT CUPY\n", outcp)
# Copy the kernel back into the output PyTorch tensor
outcp_dlpack = outcp.toDlpack()
out_dlpack = torch.utils.dlpack.from_dlpack(outcp_dlpack)
out.copy_(out_dlpack)
# print("OUT PYTORCH\n", out)
# rand_idx_i, rand_idx_j = np.random.randint(X1.shape[0]), np.random.randint(X2.shape[0])
# xi, xj = X1[rand_idx_i].detach().cpu().numpy(), X2[rand_idx_j].detach().cpu().numpy()
# nxi, nxj = np.linalg.norm(xi), np.linalg.norm(xj)
# angle1, angle2 = np.linalg.norm(nxj * xi - nxi * xj), np.linalg.norm(nxj * xi + nxi * xj)
# angle = 2.0 * np.arctan2(angle1, angle2)
# kij = nxi * nxj * (np.sin(angle) + (1.0 + self.variance) * (np.pi - angle) * np.cos(angle)) / np.pi
# print(np.abs(kij - out[rand_idx_i, rand_idx_j].item()))
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
raise NotImplementedError("LinearAngleKernel does not implement sparse apply")
def _finalize(self, A: torch.Tensor, d):
if self.debug:
print(f"LinearAngleKernel._finalize(A={A.shape}, d)")
return A
def __str__(self):
return f"NeuralSplineKernel()"
def __repr__(self):
return self.__str__()
| 22,944 | 39.183888 | 112 | py |
neural-splines | neural-splines-main/neural_splines/kmeans.py | import pykeops.torch as keops
import torch
def kmeans(x, k, num_iters=10):
"""
Implements Lloyd's algorithm for the Euclidean metric.
:param x: A tensor representing a set of N points of dimension D (shape [N, D])
:param k: The number of centroids to compute
:param num_iters: The number of K means iterations to do
:return: cl, c where cl are cluster labels for each input point (shape [N]) and c are the
cluster centroids (shape [K, D])
"""
N, D = x.shape # Number of samples, dimension of the ambient space
# Simplistic initialization for the centroids
perm = torch.randperm(N)[:k]
c = x[perm, :].clone()
cl = None
x_i = keops.LazyTensor(x.view(N, 1, D)) # (N, 1, D) samples
c_j = keops.LazyTensor(c.view(1, k, D)) # (1, K, D) centroids
# K-means loop:
# - x is the (N, D) point cloud,
# - cl is the (N,) vector of class labels
# - c is the (K, D) cloud of cluster centroids
for i in range(num_iters):
# E step: assign points to the closest cluster -------------------------
D_ij = ((x_i - c_j) ** 2).sum(-1) # (N, K) symbolic squared distances
cl = D_ij.argmin(dim=1).long().view(-1) # Points -> Nearest cluster
# M step: update the centroids to the normalized cluster average: ------
# Compute the sum of points per cluster:
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
# Divide by the number of points per cluster:
Ncl = torch.bincount(cl, minlength=k).type_as(c).view(k, 1)
c /= Ncl # in-place division to compute the average
return cl, c
| 1,649 | 35.666667 | 93 | py |
neural-splines | neural-splines-main/neural_splines/geometry.py | import torch
import numpy as np
from scipy.interpolate import RegularGridInterpolator
def normalize_pointcloud_transform(x):
"""
Compute an affine transformation that normalizes the point cloud x to lie in [-0.5, 0.5]^2
:param x: A point cloud represented as a tensor of shape [N, 3]
:return: An affine transformation represented as a tuple (t, s) where t is a translation and s is scale
"""
min_x, max_x = x.min(0)[0], x.max(0)[0]
bbox_size = max_x - min_x
translate = -(min_x + 0.5 * bbox_size)
scale = 1.0 / torch.max(bbox_size)
return translate, scale
def affine_transform_pointcloud(x, tx):
"""
Apply the affine transform tx to the point cloud x
:param x: A pytorch tensor of shape [N, 3]
:param tx: An affine transformation represented as a tuple (t, s) where t is a translation and s is scale
:return: The transformed point cloud
"""
translate, scale = tx
return scale * (x + translate)
def affine_transform_bounding_box(bbox, tx):
"""
Apply the affine transform tx to the bounding box bbox
:param bbox: A bounding box reprented as 2 3D vectors (origin, size)
:param tx: An affine transformation represented as a tuple (t, s) where t is a translation and s is scale
:return: The transformed point bounding box
"""
translate, scale = tx
return scale * (bbox[0] + translate), scale * bbox[1]
def points_in_bbox(x, bbox):
"""
Compute a mask indicating which points in x lie in the bouning box bbox
:param x: A point cloud represented as a tensor of shape [N, 3]
:param bbox: A bounding box reprented as 2 3D vectors (origin, size)
:return: A mask of shape [N] where True values correspond to points in x which lie inside bbox
"""
mask = torch.logical_and(x > bbox[0], x <= bbox[0] + bbox[1])
mask = torch.min(mask, axis=-1)[0].to(torch.bool)
return mask
def point_cloud_bounding_box(x, scale=1.0):
"""
Get the axis-aligned bounding box for a point cloud (possibly scaled by some factor)
:param x: A point cloud represented as an [N, 3]-shaped tensor
:param scale: A scale factor by which to scale the bounding box diagonal
:return: The (possibly scaled) axis-aligned bounding box for a point cloud represented as a pair (origin, size)
"""
bb_min = x.min(0)[0]
bb_size = x.max(0)[0] - bb_min
return scale_bounding_box_diameter((bb_min, bb_size), scale)
def scale_bounding_box_diameter(bbox, scale):
"""
Scale the diagonal of the bounding box bbox while maintaining its center position
:param bbox: A bounding box represented as a pair (origin, size)
:param scale: A scale factor by which to scale the input bounding box's diagonal
:return: The (possibly scaled) axis-aligned bounding box for a point cloud represented as a pair (origin, size)
"""
bb_min, bb_size = bbox
bb_diameter = torch.norm(bb_size)
bb_unit_dir = bb_size / bb_diameter
scaled_bb_size = bb_size * scale
scaled_bb_diameter = torch.norm(scaled_bb_size)
scaled_bb_min = bb_min - 0.5 * (scaled_bb_diameter - bb_diameter) * bb_unit_dir
return scaled_bb_min, scaled_bb_size
def triple_points_along_normals(x, n, eps, homogeneous=False):
"""
Convert a point cloud equipped with normals into a point cloud with points pertubed along those normals.
Each point X with normal N, in the input gets converted to 3 points:
(X, X+eps*N, X-eps*N) which have occupancy values (0, eps, -eps)
:param x: The input points of shape [N, 3]
:param n: The input normals of shape [N, 3]
:param eps: The amount to perturb points about each normal
:param homogeneous: If true, return the points in homogeneous coordinates
:return: A pair, (X, O) consisting of the new point cloud X and point occupancies O
"""
x_in = x - n * eps
x_out = x + n * eps
x_triples = torch.cat([x, x_in, x_out], dim=0)
occ_triples = torch.cat([torch.zeros(x.shape[0]),
-torch.ones(x.shape[0]),
torch.ones(x.shape[0])]).to(x) * eps
if homogeneous:
x_triples = torch.cat([x_triples, torch.ones(x_triples.shape[0], 1, dtype=x_triples.dtype)], dim=-1)
return x_triples, occ_triples
def voxel_chunks(grid_size, cells_per_axis):
"""
Iterator over ranges which partition a voxel grid into non-overlapping chunks.
:param grid_size: Size of the voxel grid to split into chunks
:param cells_per_axis: Number of cells along each axis
:return: Each call returns a pair (vmin, vmax) where vmin is the minimum indexes of the voxel chunk and vmax is
the maximum index. i.e. if vox is a voxel grid with shape grid_size, then vox[vmin:vmax] are the voxels
in the current chunk
"""
if np.isscalar(cells_per_axis):
cells_per_axis = torch.tensor([cells_per_axis] * len(grid_size)).to(torch.int32)
current_vox_min = torch.tensor([0.0, 0.0, 0.0]).to(torch.float64)
current_vox_max = torch.tensor([0.0, 0.0, 0.0]).to(torch.float64)
cell_size_float = grid_size.to(torch.float64) / cells_per_axis
for c_i in range(cells_per_axis[0]):
current_vox_min[0] = current_vox_max[0]
current_vox_max[0] = cell_size_float[0] + current_vox_max[0]
current_vox_min[1:] = 0
current_vox_max[1:] = 0
for c_j in range(cells_per_axis[1]):
current_vox_min[1] = current_vox_max[1]
current_vox_max[1] = cell_size_float[1] + current_vox_max[1]
current_vox_min[2:] = 0
current_vox_max[2:] = 0
for c_k in range(cells_per_axis[2]):
current_vox_min[2] = current_vox_max[2]
current_vox_max[2] = cell_size_float[2] + current_vox_max[2]
vox_min = torch.round(current_vox_min).to(torch.int32)
vox_max = torch.round(current_vox_max).to(torch.int32)
yield (c_i, c_j, c_k), vox_min, vox_max
def cell_weights_trilinear(vmin, vmax, pvmin, pvmax):
"""
Returns a voxel grid of weights used to blend two adjacent cells together which overlap by some amount of voxels.
:param vmin: The minimum voxel indices for the cell
:param vmax: The maximum voxel indices for the cell
:param pvmin: The minimum voxel index for the padded cell
:param pvmax: The maximum voxel index for the padded cell
:return: A voxel grid of size (pvmin - pvmax) of trilinear weights used to interpolate neighboring cells
"""
dmin = vmin - pvmin
dmax = pvmax - vmax
x, y, z = [np.unique(np.array([pvmin[i], pvmin[i] + 2.0 * dmin[i], pvmax[i] - 2.0 * dmax[i], pvmax[i]]))
for i in range(3)]
vals = np.zeros([x.shape[0], y.shape[0], z.shape[0]])
xyz = (x, y, z)
one_idxs = []
for dim in range(3):
if xyz[dim].shape[0] == 2:
one_idxs.append([0, 1])
elif xyz[dim].shape[0] == 3:
if vmin[dim] == pvmin[dim]:
one_idxs.append([0, 1])
else:
one_idxs.append([1, 2])
else:
one_idxs.append([1, 2])
for i in one_idxs[0]:
for j in one_idxs[1]:
for k in one_idxs[2]:
vals[i, j, k] = 1.0
f_w = RegularGridInterpolator((x, y, z), vals)
psize = (pvmax - pvmin).numpy()
pmin = (pvmin + 0.5).numpy()
pmax = (pvmax - 0.5).numpy()
pts = np.stack([np.ravel(a) for a in
np.mgrid[pmin[0]:pmax[0]:psize[0] * 1j,
pmin[1]:pmax[1]:psize[1] * 1j,
pmin[2]:pmax[2]:psize[2] * 1j]], axis=-1)
return torch.from_numpy(f_w(pts).reshape(psize)), pvmin, pvmax
| 7,727 | 39.673684 | 117 | py |
neural-splines | neural-splines-main/neural_splines/__init__.py | import time
import warnings
import point_cloud_utils as pcu
import falkon
from falkon.utils.tensor_helpers import create_same_stride
from .falkon_kernels import NeuralSplineKernel, LaplaceKernelSphere, LinearAngleKernel
from .geometry import *
from .kmeans import kmeans
_VERBOSITY_LEVEL_DEBUG = 0
_VERBOSITY_LEVEL_INFO = 1
_VERBOSITY_LEVEL_SILENT = 5
class FixedIndexSelector(falkon.center_selection.CenterSelector):
def __init__(self, idx, random_gen=None):
super().__init__(random_gen)
self.idx = idx
def select(self, X, Y, M):
Xc = create_same_stride((M, X.shape[1]), other=X, dtype=X.dtype, device=X.device,
pin_memory=False)
th_idx = torch.from_numpy(self.idx.astype(np.long)).to(X.device)
torch.index_select(X, dim=0, index=th_idx, out=Xc)
if Y is not None:
Yc = create_same_stride((M, Y.shape[1]), other=Y, dtype=Y.dtype, device=Y.device,
pin_memory=False)
th_idx = torch.from_numpy(self.idx.astype(np.long)).to(Y.device)
torch.index_select(Y, dim=0, index=th_idx, out=Yc)
return Xc, Yc
return Xc
def _generate_nystrom_samples(x, num_samples, sampling_method, verbosity_level=1):
if x.shape[1] != 3:
raise ValueError(f"Invalid shape for x, must be [N, 3] but got {x.shape}")
if x.shape[0] < num_samples:
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print(f"Requested more Nyström samples ({num_samples}) than points ({x.shape[0]}) using all points.")
center_selector = 'uniform'
x_ny = None
ny_count = min(num_samples, x.shape[0])
elif sampling_method == 'random':
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Nyström samples chosen uniformly at random from the input.")
center_selector = 'uniform'
x_ny = None
ny_count = min(num_samples, x.shape[0])
elif sampling_method == 'blue-noise':
blue_noise_seed = np.random.randint(2 ** 31 - 1)
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print(f"Generating {num_samples} blue noise Nyström samples for {x.shape[0]} points.")
# Allow generating +/- 5% of the requested samples so the algorighm converges fast
sample_num_tolerance = 0.05
ny_idx = pcu.downsample_point_cloud_poisson_disk(x.numpy(), num_samples, random_seed=blue_noise_seed,
sample_num_tolerance=sample_num_tolerance)
x_ny = x[ny_idx]
x_ny = torch.cat([x_ny, torch.ones(x_ny.shape[0], 1).to(x_ny)], dim=-1)
ny_count = x_ny.shape[0]
center_selector = FixedIndexSelector(idx=ny_idx)
elif sampling_method == 'k-means':
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Generating k-means Nyström samples.")
_, x_ny = kmeans(x.contiguous(), num_samples)
x_ny = torch.cat([x_ny, torch.ones(x_ny.shape[0], 1).to(x_ny)], dim=-1)
ny_count = x_ny.shape[0]
center_selector = falkon.center_selection.FixedSelector(centers=x_ny, y_centers=None)
else:
raise ValueError(f"Invalid value {sampling_method} for --nystrom-mode. "
f"Must be one of 'random', 'blue-noise' or 'k-means'")
return x_ny, center_selector, ny_count
def _run_falkon_fit(x, y, penalty, num_ny, center_selector, kernel_type="neural-spline",
maxiters=20, stop_thresh=1e-7, variance=1.0, falkon_opts=None, verbosity_level=1):
if falkon_opts is None:
falkon_opts = falkon.FalkonOptions()
# Always use cuda for everything
falkon_opts.min_cuda_pc_size_64 = 1
falkon_opts.min_cuda_pc_size_32 = 1
falkon_opts.min_cuda_iter_size_64 = 1
falkon_opts.min_cuda_iter_size_32 = 1
falkon_opts.use_cpu = False
falkon_opts.cg_tolerance = stop_thresh
falkon_opts.debug = verbosity_level <= _VERBOSITY_LEVEL_DEBUG
falkon_opts.cg_print_when_done = verbosity_level <= _VERBOSITY_LEVEL_INFO
elif verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Overiding default FALKON settings with custom options")
if kernel_type == "neural-spline":
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Neural Spline Kernel")
kernel = NeuralSplineKernel(variance=variance, opt=falkon_opts)
elif kernel_type == "spherical-laplace":
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Spherical Laplace Kernel")
kernel = LaplaceKernelSphere(alpha=-0.5, gamma=0.5, opt=falkon_opts)
elif kernel_type == "linear-angle":
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print("Using Linear Angle Kernel")
kernel = LinearAngleKernel(opt=falkon_opts)
else:
raise ValueError(f"Invalid kernel_type {kernel_type}, expected one of 'neural-spline' or 'spherical-laplace'")
fit_start_time = time.time()
model = falkon.Falkon(kernel=kernel, penalty=penalty, M=num_ny, options=falkon_opts, maxiter=maxiters,
center_selection=center_selector)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
model.fit(x, y)
if verbosity_level <= _VERBOSITY_LEVEL_INFO:
print(f"Fit model in {time.time() - fit_start_time} seconds")
return model
def get_weights(vmin, vmax, pvmin, pvmax, weight_type):
"""
Get the per-voxel partition of unity weights for a cell when reconstructing on a grid of cells
:param vmin: Minimum voxel index for this cell
:param vmax: Maximum voxel index for this cell
:param pvmin: Minimum voxel index for the padded cell
:param pvmax: Maximum voxel index for the padded cell
:param weight_type: What kind of partition-of-unity to use
:return: A triple (weights, idxmin, idxmax) where weights is a (idxmax-idxmin)-shaped voxel grid and
idxmin and idxmax are 3-tensors indicating the index range in the output voxel grid which the
weights should correspond to
"""
if weight_type == 'trilinear':
return cell_weights_trilinear(vmin, vmax, pvmin, pvmax)
elif weight_type == 'none':
return 1.0, vmin, vmax
else:
raise ValueError("Invalid weight_type, must be one of 'trilinear' or 'none'")
def load_point_cloud(filename, min_norm_normal=1e-5, dtype=torch.float64):
"""
Load a point cloud with normals, filtering out points whose normal has a magnitude below the given threshold.
:param filename: Path to a PLY file
:param min_norm_normal: The minimum norm of a normal below which we discard a point
:param dtype: The output dtype of the tensors returned
:return: A pair v, n, where v is a an [N, 3]-shaped tensor of points, n is a [N, 3]-shaped tensor of unit normals
"""
v, _, n = pcu.load_mesh_vfn(filename, dtype=np.float64)
v, idx, _ = pcu.deduplicate_point_cloud(v, 1e-15, return_index=True) # Deduplicate point cloud when loading it
n = n[idx]
# Some meshes have non unit normals, so build a binary mask of points whose normal has a reasonable magnitude
# We use this mask to remove bad vertices
mask = np.linalg.norm(n, axis=-1) > min_norm_normal
# Keep the good points and normals
x = v[mask].astype(np.float64)
n = n[mask].astype(np.float64)
n /= np.linalg.norm(n, axis=-1, keepdims=True)
return torch.from_numpy(x).to(dtype), torch.from_numpy(n).to(dtype)
def fit_model_to_pointcloud(x, n, num_ny, eps, kernel='neural-spline',
reg=1e-7, ny_mode='blue-noise',
cg_stop_thresh=1e-5, cg_max_iters=20,
outer_layer_variance=1.0,
verbosity_level=1, custom_falkon_opts=None,
normalize=True):
"""
Fit a kernel to the point cloud with points x and normals n.
:param x: A tensor of 3D points with shape [N, 3]
:param n: A tensor of unit normals with shape [N, 3]
:param num_ny: The number of Nystrom samples to use. If negative, don't use Nyström sampling.
:param ny_mode: How to generate nystrom samples. Must be one of (1) 'random', (2) 'blue-noise', or (3) 'k-means'.
:param eps: Finite differencing coefficient used to approximate the gradient by perturbing points by this
amount about their normals
:param kernel: Which kernel to use. Must be one of 'neural-spline', 'spherical-laplace', or 'linear-angle'.
:param reg: Amount of regularization to apply when solving the kernel ridge regression
:param cg_stop_thresh: Stop threshold for the conjugate gradient solver
:param cg_max_iters: Maximum number of conjugate gradient iterations
:param outer_layer_variance: Variance o
:param verbosity_level: How much should this function spam your terminal. 0 = debug, 1 = info, >5 = silent
:param custom_falkon_opts: Object of type falkon.FalkonOptions object used to override the default solver settings
:param normalize: If set, then normalize the point cloud to have zero mean
:return: A pair (model, tx) where model is a fitted neural spline model class (with the same API as scikit-learn)
and tx is an affine transformation which converts world space samples to model coordinates.
You *must* apply this transformation to points before evaluating the model.
This transformation is represented as a tuple (t, s) where t is a translation and s is scale.
"""
x, y = triple_points_along_normals(x, n, eps, homogeneous=False)
if normalize:
tx = normalize_pointcloud_transform(x)
else:
tx = 0.0, 1.0
x = affine_transform_pointcloud(x, tx)
x_ny, center_selector, ny_count = _generate_nystrom_samples(x, num_ny, ny_mode, verbosity_level=verbosity_level)
x = torch.cat([x, torch.ones(x.shape[0], 1).to(x)], dim=-1)
model = _run_falkon_fit(x, y, reg, ny_count, center_selector,
maxiters=cg_max_iters, stop_thresh=cg_stop_thresh,
kernel_type=kernel, variance=outer_layer_variance,
verbosity_level=verbosity_level, falkon_opts=custom_falkon_opts)
return model, tx
def eval_model_on_grid(model, bbox, tx, voxel_grid_size, cell_vox_min=None, cell_vox_max=None, print_message=True):
"""
Evaluate the trained model (output of fit_model_to_pointcloud) on a voxel grid.
:param model: The trained model returned from fit_model_to_pointcloud
:param bbox: The bounding box defining the region of space on which to evaluate the model
(represented as the pair (origin, size))
:param tx: An affine transformation which transforms points in world coordinates to model
coordinates before evaluating the model (the second return value of fit_model_to_grid).
The transformation is represented as a tuple (t, s) where t is a translation and s is scale.
:param voxel_grid_size: The size of the voxel grid on which to reconstruct
:param cell_vox_min: If not None, reconstruct on the subset of the voxel grid starting at these indices.
:param cell_vox_max: If not None, reconstruct on the subset of the voxel grid ending at these indices.
:param print_message: If true, print status messages to stdout.
:return: A tensor representing the model evaluated on a grid.
"""
bbox_origin, bbox_size = bbox
voxel_size = bbox_size / voxel_grid_size # size of a single voxel cell
if cell_vox_min is None:
cell_vox_min = torch.tensor([0, 0, 0], dtype=torch.int32)
if cell_vox_max is None:
cell_vox_max = voxel_grid_size
if print_message:
print(f"Evaluating model on grid of size {[_.item() for _ in (cell_vox_max - cell_vox_min)]}.")
eval_start_time = time.time()
xmin = bbox_origin + (cell_vox_min + 0.5) * voxel_size
xmax = bbox_origin + (cell_vox_max - 0.5) * voxel_size
xmin = affine_transform_pointcloud(xmin.unsqueeze(0), tx).squeeze()
xmax = affine_transform_pointcloud(xmax.unsqueeze(0), tx).squeeze()
xmin, xmax = xmin.numpy(), xmax.numpy()
cell_vox_size = (cell_vox_max - cell_vox_min).numpy()
xgrid = np.stack([_.ravel() for _ in np.mgrid[xmin[0]:xmax[0]:cell_vox_size[0] * 1j,
xmin[1]:xmax[1]:cell_vox_size[1] * 1j,
xmin[2]:xmax[2]:cell_vox_size[2] * 1j]], axis=-1)
xgrid = torch.from_numpy(xgrid).to(model.alpha_.dtype)
xgrid = torch.cat([xgrid, torch.ones(xgrid.shape[0], 1).to(xgrid)], dim=-1).to(model.alpha_.dtype)
ygrid = model.predict(xgrid).reshape(tuple(cell_vox_size.astype(np.int))).detach().cpu()
if print_message:
print(f"Evaluated model in {time.time() - eval_start_time}s.")
return ygrid
| 12,994 | 46.600733 | 118 | py |
HIWL | HIWL-main/scheme/model_vit.py | """
original code from rwightman:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class PatchEmbed(nn.Module):
"""
2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# flatten: [B, C, H, W] -> [B, C, HW]
# transpose: [B, C, HW] -> [B, HW, C]
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
return x
class Attention(nn.Module):
def __init__(self,
dim, # 输入token的dim
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop_ratio=0.,
proj_drop_ratio=0.):
super(Attention, self).__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_ratio)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop_ratio)
def forward(self, x):
# [batch_size, num_patches + 1, total_embed_dim]
B, N, C = x.shape
# qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim]
# reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head]
# permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head]
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# [batch_size, num_heads, num_patches + 1, embed_dim_per_head]
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1]
# @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head]
# transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head]
# reshape: -> [batch_size, num_patches + 1, total_embed_dim]
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
"""
MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block(nn.Module):
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_ratio=0.,
attn_drop_ratio=0.,
drop_path_ratio=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm):
super(Block, self).__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000,
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,
qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,
attn_drop_ratio=0., drop_path_ratio=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_c (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_ratio (float): dropout rate
attn_drop_ratio (float): attention dropout rate
drop_path_ratio (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
"""
super(VisionTransformer, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_ratio)
dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i],
norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)
])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.has_logits = True
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
("fc", nn.Linear(embed_dim, representation_size)),
("act", nn.Tanh())
]))
else:
self.has_logits = False
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
nn.init.trunc_normal_(self.pos_embed, std=0.02)
if self.dist_token is not None:
nn.init.trunc_normal_(self.dist_token, std=0.02)
nn.init.trunc_normal_(self.cls_token, std=0.02)
self.apply(_init_vit_weights)
def forward_features(self, x):
# [B, C, H, W] -> [B, num_patches, embed_dim]
x = self.patch_embed(x) # [B, 196, 768]
# [1, 1, 768] -> [B, 1, 768]
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1) # [B, 197, 768]
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1])
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def _init_vit_weights(m):
"""
ViT weight initialization
:param m: module
"""
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def vit_base_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768 if has_logits else None,
num_classes=num_classes)
return model
def vit_base_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=32,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768 if has_logits else None,
num_classes=num_classes)
return model
def vit_large_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024 if has_logits else None,
num_classes=num_classes)
return model
def vit_large_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024 if has_logits else None,
num_classes=num_classes)
return model
def vit_huge_patch14_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
representation_size=1280 if has_logits else None,
num_classes=num_classes)
return model
| 13,700 | 39.178886 | 118 | py |
HIWL | HIWL-main/scheme/model_googlenet.py | import torch.nn as nn
import torch
import torch.nn.functional as F
class GoogLeNet(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, init_weights=False):
super(GoogLeNet, self).__init__()
self.aux_logits = aux_logits
self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = BasicConv2d(64, 64, kernel_size=1)
self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)
if self.aux_logits:
self.aux1 = InceptionAux(512, num_classes)
self.aux2 = InceptionAux(528, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.4)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
def forward(self, x):
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
if self.training and self.aux_logits: # eval model lose this layer
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
if self.training and self.aux_logits: # eval model lose this layer
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits: # eval model lose this layer
return x, aux2, aux1
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
class Inception(nn.Module):
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
super(Inception, self).__init__()
self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) # 保证输出大小等于输入大小
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1),
BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2) # 保证输出大小等于输入大小
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
BasicConv2d(in_channels, pool_proj, kernel_size=1)
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.averagePool = nn.AvgPool2d(kernel_size=5, stride=3)
self.conv = BasicConv2d(in_channels, 128, kernel_size=1) # output[batch, 128, 4, 4]
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = self.averagePool(x)
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
x = F.dropout(x, 0.5, training=self.training)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
x = F.dropout(x, 0.5, training=self.training)
# N x 1024
x = self.fc2(x)
# N x num_classes
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
| 5,919 | 33.219653 | 92 | py |
HIWL | HIWL-main/scheme/model_vgg.py | import torch.nn as nn
import torch
# official pretrain weights
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=False):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes)
)
if init_weights:
self._initialize_weights()
def forward(self, x):
# N x 3 x 224 x 224
x = self.features(x)
# N x 512 x 7 x 7
x = torch.flatten(x, start_dim=1)
# N x 512*7*7
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
# nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_features(cfg: list):
layers = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg(model_name="vgg16", **kwargs):
assert model_name in cfgs, "Warning: model number {} not in cfgs dict!".format(model_name)
cfg = cfgs[model_name]
model = VGG(make_features(cfg), **kwargs)
return model
| 2,616 | 32.551282 | 117 | py |
HIWL | HIWL-main/scheme/model_resnet.py | import torch.nn as nn
import torch
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channel, out_channel, stride=1, downsample=None,
groups=1, width_per_group=64):
super(Bottleneck, self).__init__()
width = int(out_channel * (width_per_group / 64.)) * groups
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width,
kernel_size=1, stride=1, bias=False) # squeeze channels
self.bn1 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups,
kernel_size=3, stride=stride, bias=False, padding=1)
self.bn2 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel*self.expansion,
kernel_size=1, stride=1, bias=False) # unsqueeze channels
self.bn3 = nn.BatchNorm2d(out_channel*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
blocks_num,
num_classes=1000,
include_top=True,
groups=1,
width_per_group=64):
super(ResNet, self).__init__()
self.include_top = include_top
self.in_channel = 64
self.groups = groups
self.width_per_group = width_per_group
self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.in_channel)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, blocks_num[0])
self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)
self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)
self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)
if self.include_top:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def _make_layer(self, block, channel, block_num, stride=1):
downsample = None
if stride != 1 or self.in_channel != channel * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(channel * block.expansion))
layers = []
layers.append(block(self.in_channel,
channel,
downsample=downsample,
stride=stride,
groups=self.groups,
width_per_group=self.width_per_group))
self.in_channel = channel * block.expansion
for _ in range(1, block_num):
layers.append(block(self.in_channel,
channel,
groups=self.groups,
width_per_group=self.width_per_group))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet34(num_classes=1000, include_top=True):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
from torchsummary import summary
summary(resnet34().cuda(),(3,64,64))
def resnet50(num_classes=1000, include_top=True):
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet101(num_classes=1000, include_top=True):
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)
def resnext50_32x4d(num_classes=1000, include_top=True):
groups = 32
width_per_group = 4
return ResNet(Bottleneck, [3, 4, 6, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)
def resnext101_32x8d(num_classes=1000, include_top=True):
groups = 32
width_per_group = 8
return ResNet(Bottleneck, [3, 4, 23, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)
| 6,513 | 33.465608 | 112 | py |
HIWL | HIWL-main/scheme/train_resnet26.py | import os
import math
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_resnet26 import resnet26 as create_model
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/resnet26', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model='resnet26'
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if os.path.exists("./weights/resnet26") is False:
os.makedirs("./weights/resnet26")
if os.path.exists("./log") is False:
os.makedirs("./log")
if os.path.exists("./predicts/resnet26") is False:
os.makedirs("./predicts/resnet26")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(
args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.Resize((64, 64)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(
data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5,
test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["loss", "accuracy", "learning_rate"] # 需要画图的指标
# 加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
# optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4)
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4, T_max=150, eta_min=0)
# lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine
# scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
# optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4)
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
# lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine
# scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 # 20个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience, verbose=True)
early_stopping2 = EarlyStopping(patience, verbose=True)
for epoch in range(1000):
mean_loss4 = train_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], acc_4, epoch)
tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(val_loss_4, acc_4, model4)
if early_stopping4.early_stop:
print("epoch = {}".format(epoch))
break
for epoch in range(1000):
mean_loss2 = train_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], acc_2, epoch)
tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(val_loss_2, acc_2, model2)
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
# 验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best,
acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--batch-size', type=int, default=48)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.01)
parser.add_argument('--lrf4', type=float, default=0.01)
parser.add_argument('--weights4', type=str, default='',
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.01)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default='',
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 14,240 | 47.770548 | 138 | py |
HIWL | HIWL-main/scheme/train_resnet.py | import os
import math
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_resnet import resnet34, resnet50, resnet101
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/resnet', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
num_model = args.num_model
if num_model == 'resnet34':
create_model = resnet34
if num_model == 'resnet50':
create_model = resnet50
if num_model == 'resnet101':
create_model = resnet101
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if os.path.exists("./weights/{}".format(num_model)) is False:
os.makedirs("./weights/{}".format(num_model))
if os.path.exists("./log") is False:
os.makedirs("./log")
if os.path.exists("./predicts/{}".format(num_model)) is False:
os.makedirs("./predicts/{}".format(num_model))
sys.stdout = Logger(filename='./log/{}'.format(num_model), stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(
args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(
data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5,
test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标
#加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 #25个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience , verbose=True)
early_stopping2 = EarlyStopping(patience , verbose=True)
for epoch in range(1000):
mean_loss4 = train_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], acc_4, epoch)
tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(val_loss_4, acc_4, model4)
if early_stopping4.early_stop:
print("epoch = {}".format(epoch))
break
for epoch in range(1000):
mean_loss2 = train_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], acc_2, epoch)
tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(val_loss_2, acc_2, model2)
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
#验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.01)
parser.add_argument('--lrf4', type=float, default=0.01)
parser.add_argument('--weights4', type=str, default=r"",
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.01)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default=r"",
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
parser.add_argument('--num-model', default='resnet101', help='resnet34-101')
opt = parser.parse_args()
main(opt)
| 13,921 | 45.875421 | 138 | py |
HIWL | HIWL-main/scheme/utils.py | import os
import sys
import json
import pickle
import random
import numpy as np
import torch
from tqdm import tqdm
import copy
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
# 标签平滑嵌入到loss函数
class SMLoss(nn.Module):
''' Cross Entropy Loss with label smoothing '''
def __init__(self, label_smooth=None, class_num=137):
super().__init__()
self.label_smooth = label_smooth
self.class_num = class_num
def forward(self, pred, target):
'''
Args:
pred: prediction of model output [N, M]
target: ground truth of sampler [N]
'''
eps = 1e-12
if self.label_smooth is not None:
# cross entropy loss with label smoothing
logprobs = F.log_softmax(pred, dim=1) # softmax + log
target = F.one_hot(target, self.class_num) # 转换成one-hot
# 实现
target = torch.clamp(target.float(), min=self.label_smooth / (self.class_num - 1),
max=1.0 - self.label_smooth)
loss = -1 * torch.sum(target * logprobs, 1)
else:
# standard cross entropy loss
loss = -1. * pred.gather(1, target.unsqueeze(-1)) + torch.log(torch.exp(pred + eps).sum(dim=1))
return loss.mean()
def read_split_data(root: str, val_rate: float = 0.1, test_rate: float = 0.1):
split_rate = val_rate + test_rate
random.seed(0) # 保证随机结果可复现
assert os.path.exists(root), "dataset root: {} does not exist.".format(root)
# 遍历文件夹,一个文件夹对应一个类别
galaxy_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))]
split_galaxy_class = [] # 存储切分后类别
for i in galaxy_class:
split_galaxy_class.append(i + '_train')
split_galaxy_class.append(i + '_test')
# 排序,保证顺序一致
galaxy_class.sort()
# 生成类别名称以及对应的数字索引
class_indices = dict((k, v) for v, k in enumerate(galaxy_class))
json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
train_images_path = [] # 存储训练集的所有图片路径
train_images_label = [] # 存储训练集图片对应索引信息
val_images_path = [] # 存储验证集的所有图片路径
val_images_label = [] # 存储验证集图片对应索引信息
test_images_path = [] # 存储测试集的所有图片路径
test_images_label = [] # 存储测试集图片对应索引信息
every_class_num = [] # 存储每个类别的样本总数
split_every_class_num = [] # 存储每个类别的切分后样本总数
supported = [".jpg", ".JPG", ".png", ".PNG"] # 支持的文件后缀类型
# 遍历每个文件夹下的文件
for cla in galaxy_class:
cla_path = os.path.join(root, cla)
sample_count = 0
# 遍历获取supported支持的所有文件路径
images = [os.path.join(root, cla, i) for i in os.listdir(cla_path)
if os.path.splitext(i)[-1] in supported]
# 获取该类别对应的索引
image_class = class_indices[cla]
# 记录该类别的样本数量
every_class_num.append(len(images))
split_path = random.sample(images, round(len(images) * split_rate))
for img_path in images:
if img_path in split_path: # 如果该路径在采样的集合样本中则存入划分集
sample_count += 1
if sample_count <= len(split_path)*(val_rate/split_rate):
val_images_path.append(img_path)
val_images_label.append(image_class)
else:
test_images_path.append(img_path)
test_images_label.append(image_class)
else: # 否则存入训练集
train_images_path.append(img_path)
train_images_label.append(image_class)
print("{} images were found in the dataset.".format(sum(every_class_num)))
print("{} images for training.".format(len(train_images_path)))
print("{} images for val.".format(len(val_images_path)))
print("{} images for test.".format(len(test_images_path)))
plot_image = False
if plot_image:
# 绘制每种类别个数柱状图
plt.bar(range(len(split_every_class_num)), split_every_class_num, align='center')
# 将横坐标0,1,2,3,4替换为相应的类别名称
plt.xticks(range(len(split_every_class_num)),split_galaxy_class)
# 在柱状图上添加数值标签
for i, v in enumerate(split_every_class_num):
plt.text(x=i, y=v + 5, s=str(v), ha='center')
# 设置x坐标
plt.xlabel('image class')
# 设置y坐标
plt.ylabel('number of images')
# 设置柱状图的标题
plt.title('galaxy class distribution')
plt.show()
return train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label
def plot_data_loader_image(data_loader):
batch_size = data_loader.batch_size
plot_num = min(batch_size, 4)
json_path = './class_indices.json'
assert os.path.exists(json_path), json_path + " does not exist."
json_file = open(json_path, 'r')
class_indices = json.load(json_file)
for data in data_loader:
images, labels = data
for i in range(plot_num):
# [C, H, W] -> [H, W, C]
img = images[i].numpy().transpose(1, 2, 0)
# 反Normalize操作
img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255
label = labels[i].item()
plt.subplot(1, plot_num, i+1)
plt.xlabel(class_indices[str(label)])
plt.xticks([]) # 去掉x轴的刻度
plt.yticks([]) # 去掉y轴的刻度
plt.imshow(img.astype('uint8'))
plt.show()
def write_pickle(list_info: list, file_name: str):
with open(file_name, 'wb') as f:
pickle.dump(list_info, f)
def read_pickle(file_name: str) -> list:
with open(file_name, 'rb') as f:
info_list = pickle.load(f)
return info_list
def train_one_epoch(str,model, optimizer, data_loader, device, epoch):
model.train()
loss_function = torch.nn.CrossEntropyLoss()
mean_loss = torch.zeros(1).to(device)
optimizer.zero_grad()
data_loader = tqdm(data_loader)
for step, data in enumerate(data_loader):
images, targets = data
pred = model(images.to(device))
loss = loss_function(pred, targets.to(device))
loss.backward()
mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses
data_loader.desc = "[epoch {}] {}-meanloss: {}".format(epoch, str, round(mean_loss.item(), 3))
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss)
sys.exit(1)
optimizer.step()
optimizer.zero_grad()
print('traloss: {}'.format(mean_loss.item()))
return mean_loss.item()
def train_googlenet_one_epoch(str, model, optimizer, data_loader, device, epoch):
model.train()
# loss_function = torch.nn.CrossEntropyLoss()
# 标签平滑
loss_function = SMLoss(label_smooth=0.05, class_num=int(str.split('l')[1]))
mean_loss = torch.zeros(1).to(device)
optimizer.zero_grad()
data_loader = tqdm(data_loader)
for step, data in enumerate(data_loader):
images, labels = data
logits, aux_logits2, aux_logits1 = model(images.to(device))
loss0 = loss_function(logits, labels.to(device))
loss1 = loss_function(aux_logits1, labels.to(device))
loss2 = loss_function(aux_logits2, labels.to(device))
loss = loss0 + loss1 * 0.3 + loss2 * 0.3
loss.backward()
mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses
data_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3))
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss)
sys.exit(1)
optimizer.step()
optimizer.zero_grad()
print('traloss: {}'.format(mean_loss.item()))
return mean_loss.item()
@torch.no_grad()
def evaluateall(model2, model4,test_loader5, device):
model2.eval()
model4.eval()
classes2=[1, 2]
classes4=[0, 1, 3, 4]
# 验证样本总个数
total_num = len(test_loader5.dataset)
correct_num=torch.tensor([0]).to(device)#统计预测正确个数
pred_all=torch.tensor([]).to(device)#不断拼接最终输出为预测列表
for step, data in enumerate(tqdm(test_loader5)):
images5, labels5= data
pred4 = model4(images5.to(device))
pred4 = torch.max(pred4, dim=1)[1]#3类的预测
preddata=copy.deepcopy(pred4)#每个batch的预测结果
preddata=torch.tensor([classes4[i] for i in preddata.tolist()])#将索引转为对应类别
select_index2=torch.where(preddata==1)#选出为2类的索引
if len(select_index2[0])!=0:
select_images2=images5[select_index2]#选出为2类的图片
pred2=model2(select_images2.to(device))
pred2=torch.max(pred2,dim=1)[1]
pred2 = torch.tensor([classes2[i] for i in pred2.tolist()])
for i,j in enumerate(select_index2[0].tolist()):
preddata[j]=pred2[i]
correct_num += torch.eq(preddata.to(device), labels5.to(device)).sum()
pred_all = torch.cat([pred_all, preddata.to(device)], dim=0)
acc_combine = correct_num.item() / total_num
return acc_combine,pred_all
@torch.no_grad()
def evaluate(model, data_loader, device):
model.eval()
# 验证样本总个数
total_num = len(data_loader.dataset)
# 用于存储预测正确的样本个数
loss_function = torch.nn.CrossEntropyLoss()
sum_num = torch.zeros(1).to(device)
data_loader = tqdm(data_loader)
mean_loss = torch.zeros(1).to(device)
for step, data in enumerate(data_loader):
images, labels = data
pred = model(images.to(device))
loss = loss_function(pred, labels.to(device))
pred_label = torch.max(pred, dim=1)[1]
sum_num += torch.eq(pred_label, labels.to(device)).sum()
mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses
acc = sum_num.item() / total_num
return acc, mean_loss.item()
| 9,877 | 34.789855 | 120 | py |
HIWL | HIWL-main/scheme/B1_nol.py | import os
import math
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_efficientnet import efficientnet_b1
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/b1', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
img_size = {"B0": 224,
"B1-nol": 240,
"B2": 260,
"B3": 300,
"B4": 380,
"B5": 456,
"B6": 528,
"B7": 600}
num_model = args.num_model
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if num_model == 'B0':
create_model = efficientnet_b0
if num_model == 'B1-nol':
create_model = efficientnet_b1
if num_model == 'B2':
create_model = efficientnet_b2
if num_model == 'B3':
create_model = efficientnet_b3
if os.path.exists("./weights/{}".format(num_model)) is False:
os.makedirs("./weights/{}".format(num_model))
if os.path.exists("./log") is False:
os.makedirs("./log")
if os.path.exists("./predicts/{}".format(num_model)) is False:
os.makedirs("./predicts/{}".format(num_model))
sys.stdout = Logger(filename='./log/efficientnet-{}'.format(num_model), stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(img_size[num_model], scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(img_size[num_model]),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标
#加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
# optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4)
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0)
# lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine
# scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
# optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4)
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
# lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine
# scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 #20个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience, verbose=True)
early_stopping2 = EarlyStopping(patience, verbose=True)
for epoch in range(1000):
mean_loss4 = train_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], acc_4, epoch)
tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(val_loss_4, acc_4, model4)
if early_stopping4.early_stop:
break
for epoch in range(1000):
mean_loss2 = train_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], acc_2, epoch)
tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(val_loss_2, acc_2, model2)
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
#验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.005)
parser.add_argument('--lrf4', type=float, default=0.05)
parser.add_argument('--weights4', type=str, default='F:/pretrain pth/efficientnetb1.pth',
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.005)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default='F:/pretrain pth/efficientnetb1.pth',
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
parser.add_argument('--num-model', default='B1-nol', help='B0-B7')
opt = parser.parse_args()
main(opt)
| 14,720 | 46.640777 | 240 | py |
HIWL | HIWL-main/scheme/train_googlenet.py | import os
import math
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_googlenet import GoogLeNet as create_model
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_googlenet_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/googlenet', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model='googlenet'
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if os.path.exists("./weights/googlenet") is False:
os.makedirs("./weights/googlenet")
if os.path.exists("./log") is False:
os.makedirs("./log")
if os.path.exists("./predicts/googlenet") is False:
os.makedirs("./predicts/googlenet")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标
#加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
# optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4)
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0)
# lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine
# scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
# optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4)
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
# lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine
# scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 #25个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience , verbose=True)
early_stopping2 = EarlyStopping(patience , verbose=True)
for epoch in range(1000):
mean_loss4 = train_googlenet_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], acc_4, epoch)
tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(val_loss_4, acc_4, model4)
if early_stopping4.early_stop:
print("epoch = {}".format(epoch))
break
for epoch in range(1000):
mean_loss2 = train_googlenet_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], acc_2, epoch)
tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(val_loss_2, acc_2, model2)
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
#验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--batch-size', type=int, default=24)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.01)
parser.add_argument('--lrf4', type=float, default=0.01)
parser.add_argument('--weights4', type=str, default='',
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.01)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default='',
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 14,050 | 47.619377 | 240 | py |
HIWL | HIWL-main/scheme/train_vgg.py | # coding=UTF-8
import os
import math
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_vgg import vgg as create_model
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/vgg', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model='vgg'
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if os.path.exists("./weights/vgg") is False:
os.makedirs("./weights/vgg")
if os.path.exists("./log") is False:
os.makedirs("./log")
if os.path.exists("./predicts/vgg") is False:
os.makedirs("./predicts/vgg")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标
#加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
# optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4)
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0)
# lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine
# scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
# optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4)
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
# lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine
# scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 #25个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience , verbose=True)
early_stopping2 = EarlyStopping(patience , verbose=True)
for epoch in range(1000):
mean_loss4 = train_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], acc_4, epoch)
tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(val_loss_4, acc_4, model4)
if early_stopping4.early_stop:
print("epoch = {}".format(epoch))
break
for epoch in range(1000):
mean_loss2 = train_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], acc_2, epoch)
tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(val_loss_2, acc_2, model2)
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
#验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default="F:/dataSet/clean gzdata")
parser.add_argument('--batch-size', type=int, default=2)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.01)
parser.add_argument('--lrf4', type=float, default=0.01)
parser.add_argument('--weights4', type=str, default=r'F:\pretrain pth\vgg16-397923af.pth',
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.01)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default=r'F:\pretrain pth\vgg16-397923af.pth',
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 14,055 | 47.468966 | 240 | py |
HIWL | HIWL-main/scheme/model_resnet26.py | import torch.nn as nn
import torch
#源tf码中全局池化前有bn,不同深度的先对输入进行bn-relu再变成shortcut,同深度shortcut直接对输入下采样(maxpooling k=1*1 strid=s)
class BasicBlock(nn.Module):
def __init__(self, m, k=2, dropoutrate=0.2, istop : bool = False,isbottom : bool = False):
super(BasicBlock, self).__init__()
self.in_channel = m*k * 2
self.out_channel = m*k * 4
self.istop=istop
self.isbottom=isbottom
if self.istop:
self.in_channel = 64
self.conv1 = nn.Conv2d(in_channels=self.in_channel, out_channels=m*k, kernel_size=1,
stride=1, padding=0)
self.conv2 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3,
stride=1, padding=1)
self.conv3 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1,
stride=1, padding=0)
self.conv4 = nn.Conv2d(in_channels=self.out_channel, out_channels=m*k, kernel_size=1,
stride=1, padding=0)
self.conv5 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3,
stride=1, padding=1)
self.conv6 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1,
stride=2, padding=0)
self.convshortcut1= nn.Conv2d(in_channels=self.in_channel, out_channels=self.out_channel, kernel_size=1,#raise dimension
padding=0, stride=1)
self.convshortcut2 = nn.MaxPool2d(kernel_size=2,stride=2)#downsample
self.bninc = nn.BatchNorm2d(self.in_channel)
self.bnmk = nn.BatchNorm2d(m*k)
self.bnoutc = nn.BatchNorm2d(self.out_channel)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropoutrate)
if self.isbottom:
self.conv6 = nn.Conv2d(in_channels=m * k, out_channels=self.out_channel, kernel_size=1,
stride=1, padding=0)
def forward(self, x):
#第一个块
# identity1 = self.bninc(x)
out = self.bninc(x)
out = self.relu(out)
identity1 = out
out = self.conv1(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dropout(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv3(out)
out += self.convshortcut1(identity1)
#第二个块
# identity2 = self.bnoutc(out)
identity2 = out
out = self.bnoutc(out)
out = self.relu(out)
out = self.conv4(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv5(out)
out = self.dropout(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv6(out)
if self.isbottom:
out += identity2
out = self.bnoutc(out)
else:
out += self.convshortcut2(identity2)
return out
class ResNet26(nn.Module):
def __init__(self,
block,
mlist,
# mlist=[32, 64, 128, 256],
k,
dropoutrate,
num_classes
):
super(ResNet26, self).__init__()
self.pad=nn.ZeroPad2d(padding=(2, 3, 2, 3))
self.conv1x = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=6, stride=1)
self.maxpool = nn.MaxPool2d(kernel_size=1, stride=2)
self.conv2to5x = self._make_layer(block, mlist, k, dropoutrate)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(mlist[-1]*k*4, num_classes)
self.fc = nn.Sequential(
# nn.Dropout(p=0.3),
nn.Conv2d(in_channels=mlist[-1]*k*4,out_channels=num_classes, kernel_size=1)
)
def forward(self, x):
out = self.pad(x)
out = self.conv1x(out)
out = self.maxpool(out)
out = self.conv2to5x(out)
out = self.avgpool(out)
# out = torch.flatten(out, start_dim=1)
out = self.fc(out)
out = torch.flatten(out, start_dim=1, end_dim=3)
return out
def _make_layer(self, block, mlist, k, dropoutrate):
layers = []
for i in range(len(mlist)):
if i == 0:
layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop= True, isbottom=False))
elif (i == len(mlist)-1):
layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=True))
else:
layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=False))
return nn.Sequential(*layers)
def resnet26(block=BasicBlock, mlist=[64, 128, 256, 512], k=2, dropoutrate=0.33, num_classes=5):
return ResNet26(block=block, mlist=mlist, k=k, dropoutrate=dropoutrate, num_classes=num_classes)
# from torchsummary import summary
# summary(resnet26().cuda(),(3,64,64))
| 4,981 | 37.921875 | 128 | py |
HIWL | HIWL-main/scheme/model_efficientnet.py | import math
import copy
from functools import partial
from collections import OrderedDict
from typing import Optional, Callable
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import functional as F
def _make_divisible(ch, divisor=8, min_ch=None):
if min_ch is None:
min_ch = divisor
new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_ch < 0.9 * ch:
new_ch += divisor
return new_ch
class ConvBNActivation(nn.Sequential):
def __init__(self,
in_planes: int,
out_planes: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Optional[Callable[..., nn.Module]] = None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if activation_layer is None:
activation_layer = nn.SiLU # alias Swish (torch>=1.7)
super(ConvBNActivation, self).__init__(nn.Conv2d(in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False),
norm_layer(out_planes),
activation_layer())
class SqueezeExcitation(nn.Module):
def __init__(self,
input_c: int, # block input channel
expand_c: int, # block expand channel
squeeze_factor: int = 4):
super(SqueezeExcitation, self).__init__()
squeeze_c = input_c // squeeze_factor
self.fc1 = nn.Conv2d(expand_c, squeeze_c, 1)
self.ac1 = nn.SiLU() # alias Swish
self.fc2 = nn.Conv2d(squeeze_c, expand_c, 1)
self.ac2 = nn.Sigmoid()
def forward(self, x: Tensor) -> Tensor:
scale = F.adaptive_avg_pool2d(x, output_size=(1, 1))
scale = self.fc1(scale)
scale = self.ac1(scale)
scale = self.fc2(scale)
scale = self.ac2(scale)
return scale * x
class InvertedResidualConfig:
# kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate
def __init__(self,
kernel: int, # 3 or 5
input_c: int,
out_c: int,
expanded_ratio: int, # 1 or 6
stride: int, # 1 or 2
use_se: bool, # True
drop_rate: float,
index: str, # 1a, 2a, 2b, ...
width_coefficient: float):
self.input_c = self.adjust_channels(input_c, width_coefficient)
self.kernel = kernel
self.expanded_c = self.input_c * expanded_ratio
self.out_c = self.adjust_channels(out_c, width_coefficient)
self.use_se = use_se
self.stride = stride
self.drop_rate = drop_rate
self.index = index
@staticmethod
def adjust_channels(channels: int, width_coefficient: float):
return _make_divisible(channels * width_coefficient, 8)
class InvertedResidual(nn.Module):
def __init__(self,
cnf: InvertedResidualConfig,
norm_layer: Callable[..., nn.Module]):
super(InvertedResidual, self).__init__()
if cnf.stride not in [1, 2]:
raise ValueError("illegal stride value.")
self.use_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c)
layers = OrderedDict()
activation_layer = nn.SiLU # alias Swish
# expand
if cnf.expanded_c != cnf.input_c:
layers.update({"expand_conv": ConvBNActivation(cnf.input_c,
cnf.expanded_c,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer)})
# depthwise
layers.update({"dwconv": ConvBNActivation(cnf.expanded_c,
cnf.expanded_c,
kernel_size=cnf.kernel,
stride=cnf.stride,
groups=cnf.expanded_c,
norm_layer=norm_layer,
activation_layer=activation_layer)})
if cnf.use_se:
layers.update({"se": SqueezeExcitation(cnf.input_c,
cnf.expanded_c)})
# project
layers.update({"project_conv": ConvBNActivation(cnf.expanded_c,
cnf.out_c,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.Identity)})
self.block = nn.Sequential(layers)
self.out_channels = cnf.out_c
self.is_strided = cnf.stride > 1
# 只有在使用shortcut连接时才使用dropout层
if self.use_res_connect and cnf.drop_rate > 0:
self.dropout = nn.Dropout2d(p=cnf.drop_rate, inplace=True)
else:
self.dropout = nn.Identity()
def forward(self, x: Tensor) -> Tensor:
result = self.block(x)
result = self.dropout(result)
if self.use_res_connect:
result += x
return result
class EfficientNet(nn.Module):
def __init__(self,
width_coefficient: float,
depth_coefficient: float,
num_classes: int = 1000,
dropout_rate: float = 0.2,
drop_connect_rate: float = 0.2,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
):
super(EfficientNet, self).__init__()
# kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate, repeats
default_cnf = [[3, 32, 16, 1, 1, True, drop_connect_rate, 1],
[3, 16, 24, 6, 2, True, drop_connect_rate, 2],
[5, 24, 40, 6, 2, True, drop_connect_rate, 2],
[3, 40, 80, 6, 2, True, drop_connect_rate, 3],
[5, 80, 112, 6, 1, True, drop_connect_rate, 3],
[5, 112, 192, 6, 2, True, drop_connect_rate, 4],
[3, 192, 320, 6, 1, True, drop_connect_rate, 1]]
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.1)
adjust_channels = partial(InvertedResidualConfig.adjust_channels,
width_coefficient=width_coefficient)
# build inverted_residual_setting
bneck_conf = partial(InvertedResidualConfig,
width_coefficient=width_coefficient)
b = 0
num_blocks = float(sum(round_repeats(i[-1]) for i in default_cnf))
inverted_residual_setting = []
for stage, args in enumerate(default_cnf):
cnf = copy.copy(args)
for i in range(round_repeats(cnf.pop(-1))):
if i > 0:
# strides equal 1 except first cnf
cnf[-3] = 1 # strides
cnf[1] = cnf[2] # input_channel equal output_channel
cnf[-1] *= b / num_blocks # update dropout ratio
index = str(stage + 1) + chr(i + 97) # 1a, 2a, 2b, ...
inverted_residual_setting.append(bneck_conf(*cnf, index))
b += 1
# create layers
layers = OrderedDict()
# first conv
layers.update({"stem_conv": ConvBNActivation(in_planes=3,
out_planes=adjust_channels(32),
kernel_size=3,
stride=2,
norm_layer=norm_layer)})
# building inverted residual blocks
for cnf in inverted_residual_setting:
layers.update({cnf.index: block(cnf, norm_layer)})
# build top
last_conv_input_c = inverted_residual_setting[-1].out_c
last_conv_output_c = adjust_channels(1280)
layers.update({"top": ConvBNActivation(in_planes=last_conv_input_c,
out_planes=last_conv_output_c,
kernel_size=1,
norm_layer=norm_layer)})
self.features = nn.Sequential(layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
classifier = []
if dropout_rate > 0:
classifier.append(nn.Dropout(p=dropout_rate, inplace=True))
classifier.append(nn.Linear(last_conv_output_c, num_classes))
self.classifier = nn.Sequential(*classifier)
# initial weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def efficientnet_b0(num_classes=1000):
# input image size 224x224
return EfficientNet(width_coefficient=1.0,
depth_coefficient=1.0,
dropout_rate=0.2,
num_classes=num_classes)
def efficientnet_b1(num_classes=1000):
# input image size 240x240
return EfficientNet(width_coefficient=1.0,
depth_coefficient=1.1,
dropout_rate=0.2,
num_classes=num_classes)
def efficientnet_b2(num_classes=1000):
# input image size 260x260
return EfficientNet(width_coefficient=1.1,
depth_coefficient=1.2,
dropout_rate=0.3,
num_classes=num_classes)
def efficientnet_b3(num_classes=1000):
# input image size 300x300
return EfficientNet(width_coefficient=1.2,
depth_coefficient=1.4,
dropout_rate=0.3,
num_classes=num_classes)
def efficientnet_b4(num_classes=1000):
# input image size 380x380
return EfficientNet(width_coefficient=1.4,
depth_coefficient=1.8,
dropout_rate=0.4,
num_classes=num_classes)
def efficientnet_b5(num_classes=1000):
# input image size 456x456
return EfficientNet(width_coefficient=1.6,
depth_coefficient=2.2,
dropout_rate=0.4,
num_classes=num_classes)
def efficientnet_b6(num_classes=1000):
# input image size 528x528
return EfficientNet(width_coefficient=1.8,
depth_coefficient=2.6,
dropout_rate=0.5,
num_classes=num_classes)
def efficientnet_b7(num_classes=1000):
# input image size 600x600
return EfficientNet(width_coefficient=2.0,
depth_coefficient=3.1,
dropout_rate=0.5,
num_classes=num_classes)
| 12,752 | 37.645455 | 102 | py |
HIWL | HIWL-main/scheme/model_dieleman.py | import torch.nn as nn
import torch
class Dieleman(nn.Module):
def __init__(self, num_classes=1000):
super(Dieleman, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=6, bias=True), # input[3, 224, 224] output[48, 55, 55]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # output[48, 27, 27]
nn.Conv2d(32, 64, kernel_size=5, bias=True), # output[128, 27, 27]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # output[128, 13, 13]
nn.Conv2d(64, 128, kernel_size=3, bias=True), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, bias=True), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # output[128, 6, 6]
)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(2 * 2 * 128, 2048, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(2048, 2048, bias=True),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes ,bias=True),
)
# if init_weights:
# self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, start_dim=1)
x = self.classifier(x)
return x
| 1,519 | 37.974359 | 97 | py |
HIWL | HIWL-main/scheme/train_efficientnet.py | import os
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_efficientnet import efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/b1', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
img_size = {"B0": 224,
"B1": 240,
"B2": 260,
"B3": 300,
"B4": 380,
"B5": 456,
"B6": 528,
"B7": 600}
num_model = args.num_model
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if num_model == 'B0':
create_model = efficientnet_b0
if num_model == 'B1':
create_model = efficientnet_b1
if num_model == 'B2':
create_model = efficientnet_b2
if num_model == 'B3':
create_model = efficientnet_b3
if os.path.exists("./weights/{}".format(num_model)) is False:
os.makedirs("./weights/{}".format(num_model))
if os.path.exists("./log") is False:
os.makedirs("./log")
if os.path.exists("./predicts/{}".format(num_model)) is False:
os.makedirs("./predicts/{}".format(num_model))
sys.stdout = Logger(filename='./log/efficientnet-{}'.format(num_model), stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(img_size[num_model], scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(img_size[num_model]),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["train_loss", "val_loss", "accuracy", "learning_rate"]#需要画图的指标
#加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
# optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4)
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0)
# lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine
# scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
# optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4)
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
# lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine
# scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 #20个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience, verbose=True)
early_stopping2 = EarlyStopping(patience, verbose=True)
for epoch in range(1000):
mean_loss4 = train_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], val_loss_4, epoch)
tb_writer4.add_scalar(tags[2], acc_4, epoch)
tb_writer4.add_scalar(tags[3], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(mean_loss4, acc_4, model4)#or va_lloss_4
if early_stopping4.early_stop:
print("epoch = {}".format(epoch))
break
for epoch in range(1000):
mean_loss2 = train_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], val_loss_2, epoch)
tb_writer2.add_scalar(tags[2], acc_2, epoch)
tb_writer2.add_scalar(tags[3], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(mean_loss2, acc_2, model2)#or va_lloss_2
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
#验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.005)
parser.add_argument('--lrf4', type=float, default=0.05)
parser.add_argument('--weights4', type=str, default='F:/pretrain pth/efficientnetb1.pth',
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.005)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default='F:/pretrain pth/efficientnetb1.pth',
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
parser.add_argument('--num-model', default='B1', help='B0-B7')
opt = parser.parse_args()
main(opt)
| 14,958 | 46.640127 | 240 | py |
HIWL | HIWL-main/scheme/train_dieleman.py | import os
import math
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_dieleman import Dieleman as create_model
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/dieleman', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model = 'dieleman'
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if os.path.exists('./weights/{}'.format(num_model)) is False:
os.makedirs('./weights/{}'.format(num_model))
if os.path.exists('./log') is False:
os.makedirs('./log')
if os.path.exists('./predicts/{}'.format(num_model)) is False:
os.makedirs('./predicts/{}'.format(num_model))
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(
args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.Resize((45, 45)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.Resize((45, 45)),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(
data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5,
test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["loss", "accuracy", "learning_rate"] # 需要画图的指标
# 加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4, T_max=150, eta_min=0)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 # 25个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience, verbose=True)
early_stopping2 = EarlyStopping(patience, verbose=True)
for epoch in range(1000):
mean_loss4 = train_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], acc_4, epoch)
tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(val_loss_4, acc_4, model4)
if early_stopping4.early_stop:
print("epoch = {}".format(epoch))
break
for epoch in range(1000):
mean_loss2 = train_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], acc_2, epoch)
tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(val_loss_2, acc_2, model2)
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
# 验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best,
acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.005)
parser.add_argument('--lrf4', type=float, default=0.05)
parser.add_argument('--weights4', type=str, default='',
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.005)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default='',
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 13,800 | 47.424561 | 138 | py |
HIWL | HIWL-main/scheme/train_vit.py | import os
import math
import argparse
import sys
import copy
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_vit import vit_base_patch16_224_in21k as create_model
from my_dataset import MyDataSet, returnDataset
from utils import read_split_data, train_one_epoch, evaluateall, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/vit', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model='vit'
print(args)
print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/')
tb_writer2 = SummaryWriter('classes2/{}'.format(num_model))
tb_writer4 = SummaryWriter('classes4/{}'.format(num_model))
if os.path.exists("./weights/vit") is False:
os.makedirs("./weights/vit")
if os.path.exists("./log") is False:
os.makedirs("./log")
if os.path.exists("./predicts/vit") is False:
os.makedirs("./predicts/vit")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(256),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5)
# 权重采样,定义每个类别采样的权重
target = train_dataset2.images_class
class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight))
# 在DataLoader的时候传入采样器即可
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
val_loader5 = torch.utils.data.DataLoader(val_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
test_loader5 = torch.utils.data.DataLoader(test_dataset5,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader4 = torch.utils.data.DataLoader(train_dataset4,
batch_size=batch_size,
pin_memory=True,
shuffle=True,
num_workers=nw)
val_loader4 = torch.utils.data.DataLoader(val_dataset4,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw)
train_loader2 = torch.utils.data.DataLoader(train_dataset2,
batch_size=batch_size,
pin_memory=False,
shuffle=False,
sampler=sampler,
num_workers=nw)
val_loader2 = torch.utils.data.DataLoader(val_dataset2,
batch_size=batch_size,
shuffle=False,
pin_memory=False,
num_workers=nw)
# 如果存在预训练权重则载入
model4 = create_model(num_classes=args.num_classes4).to(device)
model2 = create_model(num_classes=args.num_classes2).to(device)
tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标
#加载权重4
if args.weights4 != "":
if os.path.exists(args.weights4):
weights4_dict = torch.load(args.weights4, map_location=device)
load_weights4_dict = {k: v for k, v in weights4_dict.items()
if model4.state_dict()[k].numel() == v.numel()}
print(model4.load_state_dict(load_weights4_dict, strict=False))
else:
raise FileNotFoundError("not found weights4 file: {}".format(args.weights4))
pg4 = [p for p in model4.parameters() if p.requires_grad]
# optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4)
optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4)
scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0)
# lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine
# scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4)
# 加载权重2
if args.weights2 != "":
if os.path.exists(args.weights2):
weights2_dict = torch.load(args.weights2, map_location=device)
load_weights2_dict = {k: v for k, v in weights2_dict.items()
if model2.state_dict()[k].numel() == v.numel()}
print(model2.load_state_dict(load_weights2_dict, strict=False))
else:
raise FileNotFoundError("not found weights2 file: {}".format(args.weights2))
pg2 = [p for p in model2.parameters() if p.requires_grad]
# optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4)
optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4)
scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0)
# lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine
# scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2)
best_acc_4 = [0, 0, 0] # 精度前三
best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)]
best_acc_2 = [0, 0, 0]
best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)]
acc_combine_best = 0
acc_combine_best_index = 0
patience = 25 #25个epoch内验证精度不下降
early_stopping4 = EarlyStopping(patience, verbose=True)
early_stopping2 = EarlyStopping(patience, verbose=True)
for epoch in range(1000):
mean_loss4 = train_one_epoch('model4', model=model4,
optimizer=optimizer4,
data_loader=train_loader4,
device=device,
epoch=epoch)
scheduler4.step()
# validate
acc_4, val_loss_4 = evaluate(model=model4,
data_loader=val_loader4,
device=device)
if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]:
best_acc_4[2] = acc_4
best_model4[2] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model))
elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = acc_4
best_model4[1] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model))
elif acc_4 > best_acc_4[0]:
best_acc_4[2] = best_acc_4[1]
best_model4[2] = best_model4[1]
best_acc_4[1] = best_acc_4[0]
best_model4[1] = best_model4[0]
best_acc_4[0] = acc_4
best_model4[0] = copy.deepcopy(model4)
torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model))
tb_writer4.add_scalar(tags[0], mean_loss4, epoch)
tb_writer4.add_scalar(tags[1], acc_4, epoch)
tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch)
print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0]))
early_stopping4(val_loss_4, acc_4, model4)
if early_stopping4.early_stop:
print("epoch = {}".format(epoch))
break
for epoch in range(1000):
mean_loss2 = train_one_epoch('model2', model=model2,
optimizer=optimizer2,
data_loader=train_loader2,
device=device,
epoch=epoch)
scheduler2.step()
acc_2, val_loss_2 = evaluate(model=model2,
data_loader=val_loader2,
device=device)
if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]:
best_acc_2[2] = acc_2
best_model2[2] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model))
elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = acc_2
best_model2[1] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model))
elif acc_2 > best_acc_2[0]:
best_acc_2[2] = best_acc_2[1]
best_model2[2] = best_model2[1]
best_acc_2[1] = best_acc_2[0]
best_model2[1] = best_model2[0]
best_acc_2[0] = acc_2
best_model2[0] = copy.deepcopy(model2)
torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model))
tb_writer2.add_scalar(tags[0], mean_loss2, epoch)
tb_writer2.add_scalar(tags[1], acc_2, epoch)
tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch)
print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0]))
early_stopping2(val_loss_2, acc_2, model2)
if early_stopping2.early_stop:
print("epoch = {}".format(epoch))
break
#验证总的
for i in range(len(best_model2)):
for j in range(len(best_model4)):
acc_combine, pred_all = evaluateall(
model2=best_model2[i],
model4=best_model4[j],
test_loader5=val_loader5,
device=device)
torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j))
if acc_combine_best < acc_combine:
acc_combine_best = acc_combine
acc_combine_best_index = (i, j)
test_acc, test_pred_all = evaluateall(
model2=best_model2[acc_combine_best_index[0]],
model4=best_model4[acc_combine_best_index[1]],
test_loader5=test_loader5,
device=device)
torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model))
print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path5', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--batch-size', type=int, default=2)
parser.add_argument('--num_classes4', type=int, default=4)
parser.add_argument('--epochs4', type=int, default=100)
parser.add_argument('--lr4', type=float, default=0.005)
parser.add_argument('--lrf4', type=float, default=0.05)
parser.add_argument('--weights4', type=str, default=r"F:\pretrain pth\jx_vit_base_patch16_224_in21k-e5005f0a.pth",
help='initial weights4 path')
parser.add_argument('--num_classes2', type=int, default=2)
parser.add_argument('--epochs2', type=int, default=150)
parser.add_argument('--lr2', type=float, default=0.005)
parser.add_argument('--lrf2', type=float, default=0.01)
parser.add_argument('--weights2', type=str, default=r"F:\pretrain pth\jx_vit_base_patch16_224_in21k-e5005f0a.pth",
help='initial weights2 path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 14,169 | 48.372822 | 240 | py |
HIWL | HIWL-main/scheme/my_dataset.py | from PIL import Image
import torch
from torch.utils.data import Dataset
import numpy as np
import copy
class MyDataSet(Dataset):
"""自定义数据集"""
def __init__(self, images_path: list, images_class: list, transform=None):
self.images_path = images_path
self.images_class = images_class
self.transform = transform
def __len__(self):
return len(self.images_path)
def __getitem__(self, item):
img = Image.open(self.images_path[item])
# RGB为彩色图片,L为灰度图片
if img.mode != 'RGB':
raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item]))
label = self.images_class[item]
if self.transform is not None:
img = self.transform(img)
return img, label
@staticmethod
def collate_fn(batch):
images, labels = tuple(zip(*batch))
images = torch.stack(images, dim=0)
labels = torch.as_tensor(labels)
return images, labels
def returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5):
#分层学习标签调整
# 两类合成一类后剩下4类[0,1,3,4],调整标签为[0,1,2,3]
train_images_path4 = copy.deepcopy(train_images_path5)
train_images_label4 = copy.deepcopy(train_images_label5)
val_images_path4 = copy.deepcopy(val_images_path5)
val_images_label4 = copy.deepcopy(val_images_label5)
for i in range(len(train_images_label4)): # 标签2改1,3改2,4改3
if (train_images_label4[i] == 2):
train_images_label4[i] = 1
if (train_images_label4[i] == 3):
train_images_label4[i] = 2
if (train_images_label4[i] == 4):
train_images_label4[i] = 3
for i in range(len(val_images_label4)): # 标签2改1,3改2,4改3
if (val_images_label4[i] == 2):
val_images_label4[i] = 1
if (val_images_label4[i] == 3):
val_images_label4[i] = 2
if (val_images_label4[i] == 4):
val_images_label4[i] = 3
# 雪茄类和侧向类的训练测试集[1,2],调整标签为[0,1]
train_images_label2 = (
np.array(copy.deepcopy(train_images_label5))[
(np.array(train_images_label5) == 1) | (np.array(train_images_label5) == 2)]-1).tolist() # 全部减1
val_images_label2 = (
np.array(copy.deepcopy(val_images_label5))[
(np.array(val_images_label5) == 1) | (np.array(val_images_label5) == 2)]-1).tolist() # 全部减1
train_images_path2 = (
np.array(copy.deepcopy(train_images_path5))[
(np.array(train_images_label5) == 1) | (np.array(train_images_label5) == 2)]).tolist()
val_images_path2 = (
np.array(copy.deepcopy(val_images_path5))[
(np.array(val_images_label5) == 1) | (np.array(val_images_label5) == 2)]).tolist()
# 实例化数据集
val_dataset5 = MyDataSet(images_path=val_images_path5,
images_class=val_images_label5,
transform=data_transform["test"])
test_dataset5 = MyDataSet(images_path=test_images_path5,
images_class=test_images_label5,
transform=data_transform["test"])
train_dataset4 = MyDataSet(images_path=train_images_path4,
images_class=train_images_label4,
transform=data_transform["train"])
val_dataset4 = MyDataSet(images_path=val_images_path4,
images_class=val_images_label4,
transform=data_transform["test"])
train_dataset2 = MyDataSet(images_path=train_images_path2,
images_class=train_images_label2,
transform=data_transform["train"])
val_dataset2 = MyDataSet(images_path=val_images_path2,
images_class=val_images_label2,
transform=data_transform["test"])
return val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 | 4,059 | 40.010101 | 151 | py |
HIWL | HIWL-main/noscheme/model_vit.py | """
original code from rwightman:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class PatchEmbed(nn.Module):
"""
2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# flatten: [B, C, H, W] -> [B, C, HW]
# transpose: [B, C, HW] -> [B, HW, C]
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
return x
class Attention(nn.Module):
def __init__(self,
dim, # 输入token的dim
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop_ratio=0.,
proj_drop_ratio=0.):
super(Attention, self).__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_ratio)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop_ratio)
def forward(self, x):
# [batch_size, num_patches + 1, total_embed_dim]
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# [batch_size, num_heads, num_patches + 1, embed_dim_per_head]
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
"""
MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block(nn.Module):
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_ratio=0.,
attn_drop_ratio=0.,
drop_path_ratio=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm):
super(Block, self).__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000,
embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True,
qk_scale=None, representation_size=None, distilled=False, drop_ratio=0.,
attn_drop_ratio=0., drop_path_ratio=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_c (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_ratio (float): dropout rate
attn_drop_ratio (float): attention dropout rate
drop_path_ratio (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
"""
super(VisionTransformer, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_ratio)
dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i],
norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)
])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.has_logits = True
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
("fc", nn.Linear(embed_dim, representation_size)),
("act", nn.Tanh())
]))
else:
self.has_logits = False
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
nn.init.trunc_normal_(self.pos_embed, std=0.02)
if self.dist_token is not None:
nn.init.trunc_normal_(self.dist_token, std=0.02)
nn.init.trunc_normal_(self.cls_token, std=0.02)
self.apply(_init_vit_weights)
def forward_features(self, x):
# [B, C, H, W] -> [B, num_patches, embed_dim]
x = self.patch_embed(x) # [B, 196, 768]
# [1, 1, 768] -> [B, 1, 768]
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1) # [B, 197, 768]
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1])
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def _init_vit_weights(m):
"""
ViT weight initialization
:param m: module
"""
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def vit_base_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768 if has_logits else None,
num_classes=num_classes)
return model
def vit_base_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=32,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768 if has_logits else None,
num_classes=num_classes)
return model
def vit_large_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024 if has_logits else None,
num_classes=num_classes)
return model
def vit_large_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024 if has_logits else None,
num_classes=num_classes)
return model
def vit_huge_patch14_224_in21k(num_classes: int = 21843, has_logits: bool = True):
model = VisionTransformer(img_size=224,
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
representation_size=1280 if has_logits else None,
num_classes=num_classes)
return model
| 13,049 | 38.189189 | 118 | py |
HIWL | HIWL-main/noscheme/model_googlenet.py | import torch.nn as nn
import torch
import torch.nn.functional as F
class GoogLeNet(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, init_weights=False):
super(GoogLeNet, self).__init__()
self.aux_logits = aux_logits
self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = BasicConv2d(64, 64, kernel_size=1)
self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)
if self.aux_logits:
self.aux1 = InceptionAux(512, num_classes)
self.aux2 = InceptionAux(528, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.4)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
def forward(self, x):
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
if self.training and self.aux_logits: # eval model lose this layer
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
if self.training and self.aux_logits: # eval model lose this layer
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits: # eval model lose this layer
return x, aux2, aux1
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
class Inception(nn.Module):
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
super(Inception, self).__init__()
self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) # 保证输出大小等于输入大小
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1),
BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2) # 保证输出大小等于输入大小
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
BasicConv2d(in_channels, pool_proj, kernel_size=1)
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.averagePool = nn.AvgPool2d(kernel_size=5, stride=3)
self.conv = BasicConv2d(in_channels, 128, kernel_size=1) # output[batch, 128, 4, 4]
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = self.averagePool(x)
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
x = F.dropout(x, 0.5, training=self.training)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
x = F.dropout(x, 0.5, training=self.training)
# N x 1024
x = self.fc2(x)
# N x num_classes
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
| 5,919 | 33.219653 | 92 | py |
HIWL | HIWL-main/noscheme/model_vgg.py | import torch.nn as nn
import torch
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=False):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes)
)
if init_weights:
self._initialize_weights()
def forward(self, x):
# N x 3 x 224 x 224
x = self.features(x)
# N x 512 x 7 x 7
x = torch.flatten(x, start_dim=1)
# N x 512*7*7
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
# nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_features(cfg: list):
layers = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg(model_name="vgg16", **kwargs):
assert model_name in cfgs, "Warning: model number {} not in cfgs dict!".format(model_name)
cfg = cfgs[model_name]
model = VGG(make_features(cfg), **kwargs)
return model
| 2,287 | 31.685714 | 117 | py |
HIWL | HIWL-main/noscheme/model_resnet.py | import torch.nn as nn
import torch
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channel, out_channel, stride=1, downsample=None,
groups=1, width_per_group=64):
super(Bottleneck, self).__init__()
width = int(out_channel * (width_per_group / 64.)) * groups
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width,
kernel_size=1, stride=1, bias=False) # squeeze channels
self.bn1 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups,
kernel_size=3, stride=stride, bias=False, padding=1)
self.bn2 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel*self.expansion,
kernel_size=1, stride=1, bias=False) # unsqueeze channels
self.bn3 = nn.BatchNorm2d(out_channel*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
blocks_num,
num_classes=1000,
include_top=True,
groups=1,
width_per_group=64):
super(ResNet, self).__init__()
self.include_top = include_top
self.in_channel = 64
self.groups = groups
self.width_per_group = width_per_group
self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.in_channel)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, blocks_num[0])
self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)
self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)
self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)
if self.include_top:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def _make_layer(self, block, channel, block_num, stride=1):
downsample = None
if stride != 1 or self.in_channel != channel * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(channel * block.expansion))
layers = []
layers.append(block(self.in_channel,
channel,
downsample=downsample,
stride=stride,
groups=self.groups,
width_per_group=self.width_per_group))
self.in_channel = channel * block.expansion
for _ in range(1, block_num):
layers.append(block(self.in_channel,
channel,
groups=self.groups,
width_per_group=self.width_per_group))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet34(num_classes=1000, include_top=True):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet50(num_classes=1000, include_top=True):
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet101(num_classes=1000, include_top=True):
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)
def resnext50_32x4d(num_classes=1000, include_top=True):
groups = 32
width_per_group = 4
return ResNet(Bottleneck, [3, 4, 6, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)
def resnext101_32x8d(num_classes=1000, include_top=True):
groups = 32
width_per_group = 8
return ResNet(Bottleneck, [3, 4, 23, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)
| 6,446 | 32.931579 | 112 | py |
HIWL | HIWL-main/noscheme/B1_noh.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
from model_efficientnet import efficientnet_b1 as create_model
from my_dataset import MyDataSet
from utils import read_split_data, train_one_epoch, evaluate_noh
class Logger(object):
def __init__(self, filename='./log/effnoh', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
sys.stdout = Logger(stream=sys.stdout)
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
img_size = {"B0": 224,
"B1": 240,
"B2": 260,
"B3": 300,
"B4": 380,
"B5": 456,
"B6": 528,
"B7": 600}
num_model = "B1_noh"
if os.path.exists("./weights/{}".format(num_model)) is False:
os.makedirs("./weights/{}".format(num_model))
if os.path.exists("../scheme/log") is False:
os.makedirs("./log")
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(img_size['B1'], scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(img_size['B1']),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
model = create_model(num_classes=args.num_classes).to(device)
# model.load_state_dict(torch.load(args.weights, map_location=device))
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(args.epochs):
# train
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
sum_num, pred_all = evaluate_noh(model=model,
data_loader=val_loader,
device=device)
val_acc = sum_num / len(val_data_set)
torch.save(model.state_dict(), "./weights/B1_noh/model-{}.pth".format(epoch))
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(pred_all, 'b1_noh_val_811.pth')
print("[epoch {}] accuracy: {}".format(epoch, round(val_acc, 3)))
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
# test
sum_num, pred_all = evaluate_noh(model=model,
data_loader=test_loader,
device=device)
test_acc = sum_num / len(test_data_set)
torch.save(pred_all, 'b1_noh_test_811.pth')
print('test_acc:{}'.format(test_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str,
default=r"F:/dataSet/clean gzdata")
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--lrf', type=float, default=0.005)
parser.add_argument('--weights', type=str, default=r'',
help='initial weights path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
opt = parser.parse_args()
main(opt)
| 7,347 | 42.47929 | 147 | py |
HIWL | HIWL-main/noscheme/train_resnet26.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
from model_resnet26 import resnet26 as create_model
from my_dataset import MyDataSet
from utils import read_split_data, train_one_epoch, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/resnet26', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model='resnet26'
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
if os.path.exists("./weights/resnet26") is False:
os.makedirs("./weights/resnet26")
if os.path.exists("./log") is False:
os.makedirs("./log")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(
args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.Resize((64, 64)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
patience = 25 #25个epoch内验证精度不下降
early_stopping= EarlyStopping(patience, verbose=True)
model = create_model(num_classes=args.num_classes).to(device)
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(1000):
# train
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
val_acc, val_loss= evaluate(model=model,
data_loader=val_loader,
device=device)
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(best_model.state_dict(), "./weights/resnet26/bestmodel-{}.pth".format(num_model, epoch))
print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3)))
early_stopping(val_loss, val_acc, model)
if early_stopping.early_stop:
print("epoch = {}".format(epoch))
break
# torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch))
# test
test_acc, sum_num = evaluate(model=best_model,
data_loader=test_loader,
device=device)
print("best test accuracy: {}".format(round(test_acc, 3)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--lrf', type=float, default=0.01)
parser.add_argument('--weights', type=str, default='',
help='initial weights path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 7,618 | 44.35119 | 132 | py |
HIWL | HIWL-main/noscheme/train_resnet.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
from model_resnet import resnet34, resnet50, resnet101
from my_dataset import MyDataSet
from utils import read_split_data, train_one_epoch, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/resnet', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
num_model = args.num_model
if num_model == 'resnet34':
create_model = resnet34
if num_model == 'resnet50':
create_model = resnet50
if num_model == 'resnet101':
create_model = resnet101
if os.path.exists("./weights/{}".format(num_model)) is False:
os.makedirs("./weights/{}".format(num_model))
if os.path.exists("./log") is False:
os.makedirs("./log")
sys.stdout = Logger(filename='./log/{}'.format(num_model), stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
patience = 25 #25个epoch内验证精度不下降
early_stopping= EarlyStopping(patience, verbose=True)
model = create_model(num_classes=args.num_classes).to(device)
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(1000):
# train
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
val_acc, val_loss= evaluate(model=model,
data_loader=val_loader,
device=device)
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch))
print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3)))
early_stopping(val_loss, val_acc, model)
if early_stopping.early_stop:
print("epoch = {}".format(epoch))
break
# torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch))
# test
test_acc, sum_num = evaluate(model=best_model,
data_loader=test_loader,
device=device)
print("best test accuracy: {}".format(round(test_acc, 3)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=24)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lrf', type=float, default=0.01)
parser.add_argument('--weights', type=str, default='',
help='initial weights path')
parser.add_argument('--freeze-layers', type=bool, default=False)
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
parser.add_argument('--num-model', default='resnet50', help='resnet34-101')
opt = parser.parse_args()
main(opt)
| 7,886 | 44.327586 | 147 | py |
HIWL | HIWL-main/noscheme/utils.py | import os
import sys
import json
import pickle
import random
import numpy as np
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
# 标签平滑嵌入到loss函数
class SMLoss(nn.Module):
''' Cross Entropy Loss with label smoothing '''
def __init__(self, label_smooth=None, class_num=137):
super().__init__()
self.label_smooth = label_smooth
self.class_num = class_num
def forward(self, pred, target):
'''
Args:
pred: prediction of model output [N, M]
target: ground truth of sampler [N]
'''
eps = 1e-12
if self.label_smooth is not None:
# cross entropy loss with label smoothing
logprobs = F.log_softmax(pred, dim=1) # softmax + log
target = F.one_hot(target, self.class_num) # 转换成one-hot
# label smoothing
# 实现
target = torch.clamp(target.float(), min=self.label_smooth / (self.class_num - 1),
max=1.0 - self.label_smooth)
loss = -1 * torch.sum(target * logprobs, 1)
else:
# standard cross entropy loss
loss = -1. * pred.gather(1, target.unsqueeze(-1)) + torch.log(torch.exp(pred + eps).sum(dim=1))
return loss.mean()
def read_split_data(root: str, val_rate: float = 0.1, test_rate: float = 0.1):
split_rate = val_rate + test_rate
random.seed(0) # 保证随机结果可复现
assert os.path.exists(root), "dataset root: {} does not exist.".format(root)
# 遍历文件夹,一个文件夹对应一个类别
galaxy_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))]
split_galaxy_class = [] # 存储切分后类别
for i in galaxy_class:
split_galaxy_class.append(i + '_train')
split_galaxy_class.append(i + '_test')
# 排序,保证顺序一致
galaxy_class.sort()
# 生成类别名称以及对应的数字索引
class_indices = dict((k, v) for v, k in enumerate(galaxy_class))
json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
train_images_path = [] # 存储训练集的所有图片路径
train_images_label = [] # 存储训练集图片对应索引信息
val_images_path = [] # 存储验证集的所有图片路径
val_images_label = [] # 存储验证集图片对应索引信息
test_images_path = [] # 存储测试集的所有图片路径
test_images_label = [] # 存储测试集图片对应索引信息
every_class_num = [] # 存储每个类别的样本总数
split_every_class_num = [] # 存储每个类别的切分后样本总数
supported = [".jpg", ".JPG", ".png", ".PNG"] # 支持的文件后缀类型
# 遍历每个文件夹下的文件
for cla in galaxy_class:
cla_path = os.path.join(root, cla)
sample_count = 0
# 遍历获取supported支持的所有文件路径
images = [os.path.join(root, cla, i) for i in os.listdir(cla_path)
if os.path.splitext(i)[-1] in supported]
# 获取该类别对应的索引
image_class = class_indices[cla]
# 记录该类别的样本数量
every_class_num.append(len(images))
# 按比例随机采样验证样本
split_path = random.sample(images, round(len(images) * split_rate))
for img_path in images:
if img_path in split_path: # 如果该路径在采样的集合样本中则存入划分集
sample_count += 1
if sample_count <= len(split_path)*(val_rate/split_rate):
val_images_path.append(img_path)
val_images_label.append(image_class)
else:
test_images_path.append(img_path)
test_images_label.append(image_class)
else: # 否则存入训练集
train_images_path.append(img_path)
train_images_label.append(image_class)
print("{} images were found in the dataset.".format(sum(every_class_num)))
print("{} images for training.".format(len(train_images_path)))
print("{} images for val.".format(len(val_images_path)))
print("{} images for test.".format(len(test_images_path)))
plot_image = False
if plot_image:
# 绘制每种类别个数柱状图
plt.bar(range(len(split_every_class_num)), split_every_class_num, align='center')
# 将横坐标0,1,2,3,4替换为相应的类别名称
plt.xticks(range(len(split_every_class_num)),split_galaxy_class)
# 在柱状图上添加数值标签
for i, v in enumerate(split_every_class_num):
plt.text(x=i, y=v + 5, s=str(v), ha='center')
# 设置x坐标
plt.xlabel('image class')
# 设置y坐标
plt.ylabel('number of images')
# 设置柱状图的标题
plt.title('galaxy class distribution')
plt.show()
return train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label
def plot_data_loader_image(data_loader):
batch_size = data_loader.batch_size
plot_num = min(batch_size, 4)
json_path = './class_indices.json'
assert os.path.exists(json_path), json_path + " does not exist."
json_file = open(json_path, 'r')
class_indices = json.load(json_file)
for data in data_loader:
images, labels = data
for i in range(plot_num):
# [C, H, W] -> [H, W, C]
img = images[i].numpy().transpose(1, 2, 0)
# 反Normalize操作
img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255
label = labels[i].item()
plt.subplot(1, plot_num, i+1)
plt.xlabel(class_indices[str(label)])
plt.xticks([]) # 去掉x轴的刻度
plt.yticks([]) # 去掉y轴的刻度
plt.imshow(img.astype('uint8'))
plt.show()
def write_pickle(list_info: list, file_name: str):
with open(file_name, 'wb') as f:
pickle.dump(list_info, f)
def read_pickle(file_name: str) -> list:
with open(file_name, 'rb') as f:
info_list = pickle.load(f)
return info_list
def train_one_epoch(model, optimizer, data_loader, device, epoch):
model.train()
# loss_function = torch.nn.CrossEntropyLoss()
loss_function = SMLoss(label_smooth=0.05, class_num=5)
mean_loss = torch.zeros(1).to(device)
optimizer.zero_grad()
data_loader = tqdm(data_loader)
for step, data in enumerate(data_loader):
images, labels = data
pred = model(images.to(device))
loss = loss_function(pred, labels.to(device))
loss.backward()
mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses
data_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3))
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss)
sys.exit(1)
optimizer.step()
optimizer.zero_grad()
print('traloss: {}'.format(mean_loss.item()))
return mean_loss.item()
def train_googlenet_one_epoch(model, optimizer, data_loader, device, epoch):
model.train()
loss_function = torch.nn.CrossEntropyLoss()
# 标签平滑
# loss_function = SMLoss(label_smooth=0.05, class_num=int(str.split('l')[1]))
mean_loss = torch.zeros(1).to(device)
optimizer.zero_grad()
data_loader = tqdm(data_loader)
for step, data in enumerate(data_loader):
images, labels = data
logits, aux_logits2, aux_logits1 = model(images.to(device))
loss0 = loss_function(logits, labels.to(device))
loss1 = loss_function(aux_logits1, labels.to(device))
loss2 = loss_function(aux_logits2, labels.to(device))
loss = loss0 + loss1 * 0.3 + loss2 * 0.3
loss.backward()
mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses
data_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3))
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss)
sys.exit(1)
optimizer.step()
optimizer.zero_grad()
print('traloss: {}'.format(mean_loss.item()))
return mean_loss.item()
@torch.no_grad()
def evaluate(model, data_loader, device):
model.eval()
# 验证样本总个数
total_num = len(data_loader.dataset)
# 用于存储预测正确的样本个数
loss_function = torch.nn.CrossEntropyLoss()
sum_num = torch.zeros(1).to(device)
data_loader = tqdm(data_loader)
mean_loss = torch.zeros(1).to(device)
for step, data in enumerate(data_loader):
images, labels = data
pred = model(images.to(device))
loss = loss_function(pred, labels.to(device))
pred_label = torch.max(pred, dim=1)[1]
sum_num += torch.eq(pred_label, labels.to(device)).sum()
mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses
acc = sum_num.item() / total_num
return acc, mean_loss.item()
@torch.no_grad()
def evaluate_noh(model, data_loader, device):
model.eval()
mean_loss = torch.zeros(1).to(device)
loss_function = torch.nn.CrossEntropyLoss()
# 用于存储预测正确的样本个数
sum_num = torch.zeros(1).to(device)
pred_all = torch.tensor([]).to(device)
data_loader = tqdm(data_loader)
for step, data in enumerate(data_loader):
images, labels = data
pred = model(images.to(device))
loss = loss_function(pred, labels.to(device))
mean_loss = (mean_loss * step + loss.detach()) / (step + 1)
pred = torch.max(pred, dim=1)[1]
sum_num += torch.eq(pred, labels.to(device)).sum()
pred_all = torch.cat([pred_all, pred.to(device)], dim=0)
print('valloss: {}'.format(mean_loss.item()))
return sum_num.item(), pred_all | 9,482 | 34.384328 | 120 | py |
HIWL | HIWL-main/noscheme/train_googlenet.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
from model_googlenet import GoogLeNet as create_model
from my_dataset import MyDataSet
from utils import read_split_data, train_googlenet_one_epoch, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/googlenet', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model = 'googlenet'
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
if os.path.exists("./weights/googlenet") is False:
os.makedirs("./weights/googlenet")
if os.path.exists("./log") is False:
os.makedirs("./log")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
patience = 25 #25个epoch内验证精度不下降
early_stopping= EarlyStopping(patience, verbose=True)
model = create_model(num_classes=args.num_classes).to(device)
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(1000):
# train
mean_loss = train_googlenet_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
val_acc, val_loss= evaluate(model=model,
data_loader=val_loader,
device=device)
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch))
print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3)))
early_stopping(val_loss, val_acc, model)
if early_stopping.early_stop:
print("epoch = {}".format(epoch))
break
# torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch))
# test
test_acc, sum_num = evaluate(model=best_model,
data_loader=test_loader,
device=device)
print("best test accuracy: {}".format(round(test_acc, 3)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=24)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lrf', type=float, default=0.01)
parser.add_argument('--weights', type=str, default='',
help='initial weights path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 7,500 | 44.460606 | 147 | py |
HIWL | HIWL-main/noscheme/train_vgg.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
from model_vgg import vgg as create_model
from my_dataset import MyDataSet
from utils import read_split_data, train_one_epoch, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/vgg', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
if os.path.exists("./weights/vgg") is False:
os.makedirs("./weights/vgg")
if os.path.exists("./log") is False:
os.makedirs("./log")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25)),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(256),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
patience = 25 #25个epoch内验证精度不下降
early_stopping= EarlyStopping(patience, verbose=True)
model = create_model(num_classes=args.num_classes).to(device)
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(1000):
# train
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
val_acc, val_loss= evaluate(model=model,
data_loader=val_loader,
device=device)
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(best_model.state_dict(), "./weights/vgg/bestmodel-{}.pth".format(epoch))
print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3)))
early_stopping(val_loss, val_acc, model)
if early_stopping.early_stop:
print("epoch = {}".format(epoch))
break
# torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch))
# test
test_acc, sum_num = evaluate(model=best_model,
data_loader=test_loader,
device=device)
print("best test accuracy: {}".format(round(test_acc, 3)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lrf', type=float, default=0.01)
parser.add_argument('--data-path', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--weights', type=str, default='',
help='initial weights path')
parser.add_argument('--freeze-layers', type=bool, default=False)
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 7,542 | 43.370588 | 147 | py |
HIWL | HIWL-main/noscheme/model_resnet26.py | import torch.nn as nn
import torch
#源tf码中全局池化前有bn,不同深度的先对输入进行bn-relu再变成shortcut,同深度shortcut直接对输入下采样(maxpooling k=1*1 strid=s)
class BasicBlock(nn.Module):
def __init__(self, m, k=2, dropoutrate=0.2, istop : bool = False,isbottom : bool = False):
super(BasicBlock, self).__init__()
self.in_channel = m*k * 2
self.out_channel = m*k * 4
self.istop=istop
self.isbottom=isbottom
if self.istop:
self.in_channel = 64
self.conv1 = nn.Conv2d(in_channels=self.in_channel, out_channels=m*k, kernel_size=1,
stride=1, padding=0)
self.conv2 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3,
stride=1, padding=1)
self.conv3 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1,
stride=1, padding=0)
self.conv4 = nn.Conv2d(in_channels=self.out_channel, out_channels=m*k, kernel_size=1,
stride=1, padding=0)
self.conv5 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3,
stride=1, padding=1)
self.conv6 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1,
stride=2, padding=0)
self.convshortcut1= nn.Conv2d(in_channels=self.in_channel, out_channels=self.out_channel, kernel_size=1,#raise dimension
padding=0, stride=1)
self.convshortcut2 = nn.MaxPool2d(kernel_size=2,stride=2)#downsample
self.bninc = nn.BatchNorm2d(self.in_channel)
self.bnmk = nn.BatchNorm2d(m*k)
self.bnoutc = nn.BatchNorm2d(self.out_channel)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropoutrate)
if self.isbottom:
self.conv6 = nn.Conv2d(in_channels=m * k, out_channels=self.out_channel, kernel_size=1,
stride=1, padding=0)
def forward(self, x):
#第一个块
# identity1 = self.bninc(x)
out = self.bninc(x)
out = self.relu(out)
identity1 = out
out = self.conv1(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dropout(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv3(out)
out += self.convshortcut1(identity1)
#第二个块
# identity2 = self.bnoutc(out)
identity2 = out
out = self.bnoutc(out)
out = self.relu(out)
out = self.conv4(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv5(out)
out = self.dropout(out)
out = self.bnmk(out)
out = self.relu(out)
out = self.conv6(out)
if self.isbottom:
out += identity2
out = self.bnoutc(out)
else:
out += self.convshortcut2(identity2)
return out
class ResNet26(nn.Module):
def __init__(self,
block,
mlist,
# mlist=[32, 64, 128, 256],
k,
dropoutrate,
num_classes
):
super(ResNet26, self).__init__()
self.pad=nn.ZeroPad2d(padding=(2, 3, 2, 3))
self.conv1x = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=6, stride=1)
self.maxpool = nn.MaxPool2d(kernel_size=1, stride=2)
self.conv2to5x = self._make_layer(block, mlist, k, dropoutrate)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(mlist[-1]*k*4, num_classes)
self.fc = nn.Sequential(
# nn.Dropout(p=0.3),
nn.Conv2d(in_channels=mlist[-1]*k*4,out_channels=num_classes, kernel_size=1)
)
def forward(self, x):
out = self.pad(x)
out = self.conv1x(out)
out = self.maxpool(out)
out = self.conv2to5x(out)
out = self.avgpool(out)
# out = torch.flatten(out, start_dim=1)
out = self.fc(out)
out = torch.flatten(out, start_dim=1, end_dim=3)
return out
def _make_layer(self, block, mlist, k, dropoutrate):
layers = []
for i in range(len(mlist)):
if i == 0:
layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop= True, isbottom=False))
elif (i == len(mlist)-1):
layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=True))
else:
layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=False))
return nn.Sequential(*layers)
def resnet26(block=BasicBlock, mlist=[64, 128, 256, 512], k=2, dropoutrate=0.35, num_classes=5):
return ResNet26(block=block, mlist=mlist, k=k, dropoutrate=dropoutrate, num_classes=num_classes)
#查看具体网络结构和参数
# from torchinfo import summary
# summary(resnet26().cuda(),(16,3,64,64))
| 4,994 | 37.72093 | 128 | py |
HIWL | HIWL-main/noscheme/model_efficientnet.py | import math
import copy
from functools import partial
from collections import OrderedDict
from typing import Optional, Callable
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import functional as F
def _make_divisible(ch, divisor=8, min_ch=None):
if min_ch is None:
min_ch = divisor
new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_ch < 0.9 * ch:
new_ch += divisor
return new_ch
class ConvBNActivation(nn.Sequential):
def __init__(self,
in_planes: int,
out_planes: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Optional[Callable[..., nn.Module]] = None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if activation_layer is None:
activation_layer = nn.SiLU # alias Swish (torch>=1.7)
super(ConvBNActivation, self).__init__(nn.Conv2d(in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False),
norm_layer(out_planes),
activation_layer())
class SqueezeExcitation(nn.Module):
def __init__(self,
input_c: int, # block input channel
expand_c: int, # block expand channel
squeeze_factor: int = 4):
super(SqueezeExcitation, self).__init__()
squeeze_c = input_c // squeeze_factor
self.fc1 = nn.Conv2d(expand_c, squeeze_c, 1)
self.ac1 = nn.SiLU() # alias Swish
self.fc2 = nn.Conv2d(squeeze_c, expand_c, 1)
self.ac2 = nn.Sigmoid()
def forward(self, x: Tensor) -> Tensor:
scale = F.adaptive_avg_pool2d(x, output_size=(1, 1))
scale = self.fc1(scale)
scale = self.ac1(scale)
scale = self.fc2(scale)
scale = self.ac2(scale)
return scale * x
class InvertedResidualConfig:
# kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate
def __init__(self,
kernel: int, # 3 or 5
input_c: int,
out_c: int,
expanded_ratio: int, # 1 or 6
stride: int, # 1 or 2
use_se: bool, # True
drop_rate: float,
index: str, # 1a, 2a, 2b, ...
width_coefficient: float):
self.input_c = self.adjust_channels(input_c, width_coefficient)
self.kernel = kernel
self.expanded_c = self.input_c * expanded_ratio
self.out_c = self.adjust_channels(out_c, width_coefficient)
self.use_se = use_se
self.stride = stride
self.drop_rate = drop_rate
self.index = index
@staticmethod
def adjust_channels(channels: int, width_coefficient: float):
return _make_divisible(channels * width_coefficient, 8)
class InvertedResidual(nn.Module):
def __init__(self,
cnf: InvertedResidualConfig,
norm_layer: Callable[..., nn.Module]):
super(InvertedResidual, self).__init__()
if cnf.stride not in [1, 2]:
raise ValueError("illegal stride value.")
self.use_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c)
layers = OrderedDict()
activation_layer = nn.SiLU # alias Swish
# expand
if cnf.expanded_c != cnf.input_c:
layers.update({"expand_conv": ConvBNActivation(cnf.input_c,
cnf.expanded_c,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer)})
# depthwise
layers.update({"dwconv": ConvBNActivation(cnf.expanded_c,
cnf.expanded_c,
kernel_size=cnf.kernel,
stride=cnf.stride,
groups=cnf.expanded_c,
norm_layer=norm_layer,
activation_layer=activation_layer)})
if cnf.use_se:
layers.update({"se": SqueezeExcitation(cnf.input_c,
cnf.expanded_c)})
# project
layers.update({"project_conv": ConvBNActivation(cnf.expanded_c,
cnf.out_c,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.Identity)})
self.block = nn.Sequential(layers)
self.out_channels = cnf.out_c
self.is_strided = cnf.stride > 1
# 只有在使用shortcut连接时才使用dropout层
if self.use_res_connect and cnf.drop_rate > 0:
self.dropout = nn.Dropout2d(p=cnf.drop_rate, inplace=True)
else:
self.dropout = nn.Identity()
def forward(self, x: Tensor) -> Tensor:
result = self.block(x)
result = self.dropout(result)
if self.use_res_connect:
result += x
return result
class EfficientNet(nn.Module):
def __init__(self,
width_coefficient: float,
depth_coefficient: float,
num_classes: int = 1000,
dropout_rate: float = 0.2,
drop_connect_rate: float = 0.2,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
):
super(EfficientNet, self).__init__()
# kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate, repeats
default_cnf = [[3, 32, 16, 1, 1, True, drop_connect_rate, 1],
[3, 16, 24, 6, 2, True, drop_connect_rate, 2],
[5, 24, 40, 6, 2, True, drop_connect_rate, 2],
[3, 40, 80, 6, 2, True, drop_connect_rate, 3],
[5, 80, 112, 6, 1, True, drop_connect_rate, 3],
[5, 112, 192, 6, 2, True, drop_connect_rate, 4],
[3, 192, 320, 6, 1, True, drop_connect_rate, 1]]
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.1)
adjust_channels = partial(InvertedResidualConfig.adjust_channels,
width_coefficient=width_coefficient)
# build inverted_residual_setting
bneck_conf = partial(InvertedResidualConfig,
width_coefficient=width_coefficient)
b = 0
num_blocks = float(sum(round_repeats(i[-1]) for i in default_cnf))
inverted_residual_setting = []
for stage, args in enumerate(default_cnf):
cnf = copy.copy(args)
for i in range(round_repeats(cnf.pop(-1))):
if i > 0:
# strides equal 1 except first cnf
cnf[-3] = 1 # strides
cnf[1] = cnf[2] # input_channel equal output_channel
cnf[-1] *= b / num_blocks # update dropout ratio
index = str(stage + 1) + chr(i + 97) # 1a, 2a, 2b, ...
inverted_residual_setting.append(bneck_conf(*cnf, index))
b += 1
# create layers
layers = OrderedDict()
# first conv
layers.update({"stem_conv": ConvBNActivation(in_planes=3,
out_planes=adjust_channels(32),
kernel_size=3,
stride=2,
norm_layer=norm_layer)})
# building inverted residual blocks
for cnf in inverted_residual_setting:
layers.update({cnf.index: block(cnf, norm_layer)})
# build top
last_conv_input_c = inverted_residual_setting[-1].out_c
last_conv_output_c = adjust_channels(1280)
layers.update({"top": ConvBNActivation(in_planes=last_conv_input_c,
out_planes=last_conv_output_c,
kernel_size=1,
norm_layer=norm_layer)})
self.features = nn.Sequential(layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
classifier = []
if dropout_rate > 0:
classifier.append(nn.Dropout(p=dropout_rate, inplace=True))
classifier.append(nn.Linear(last_conv_output_c, num_classes))
self.classifier = nn.Sequential(*classifier)
# initial weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def efficientnet_b0(num_classes=1000):
# input image size 224x224
return EfficientNet(width_coefficient=1.0,
depth_coefficient=1.0,
dropout_rate=0.2,
num_classes=num_classes)
def efficientnet_b1(num_classes=1000):
# input image size 240x240
return EfficientNet(width_coefficient=1.0,
depth_coefficient=1.1,
dropout_rate=0.2,
num_classes=num_classes)
def efficientnet_b2(num_classes=1000):
# input image size 260x260
return EfficientNet(width_coefficient=1.1,
depth_coefficient=1.2,
dropout_rate=0.3,
num_classes=num_classes)
def efficientnet_b3(num_classes=1000):
# input image size 300x300
return EfficientNet(width_coefficient=1.2,
depth_coefficient=1.4,
dropout_rate=0.3,
num_classes=num_classes)
def efficientnet_b4(num_classes=1000):
# input image size 380x380
return EfficientNet(width_coefficient=1.4,
depth_coefficient=1.8,
dropout_rate=0.4,
num_classes=num_classes)
def efficientnet_b5(num_classes=1000):
# input image size 456x456
return EfficientNet(width_coefficient=1.6,
depth_coefficient=2.2,
dropout_rate=0.4,
num_classes=num_classes)
def efficientnet_b6(num_classes=1000):
# input image size 528x528
return EfficientNet(width_coefficient=1.8,
depth_coefficient=2.6,
dropout_rate=0.5,
num_classes=num_classes)
def efficientnet_b7(num_classes=1000):
# input image size 600x600
return EfficientNet(width_coefficient=2.0,
depth_coefficient=3.1,
dropout_rate=0.5,
num_classes=num_classes)
| 12,752 | 37.645455 | 102 | py |
HIWL | HIWL-main/noscheme/model_dieleman.py | import torch.nn as nn
import torch
class Dieleman(nn.Module):
def __init__(self, num_classes=1000):
super(Dieleman, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=6, bias=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=5, bias=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, bias=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(2 * 2 * 128, 2048, bias=True),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(2048, 2048, bias=True),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes ,bias=True),
)
# if init_weights:
# self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, start_dim=1)
x = self.classifier(x)
return x
| 1,269 | 31.564103 | 58 | py |
HIWL | HIWL-main/noscheme/train_efficientnet.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
from model_efficientnet import efficientnet_b0, efficientnet_b1, efficientnet_b2
from my_dataset import MyDataSet
from utils import read_split_data, train_one_epoch, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/efficientnet', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
img_size = {"B0": 224,
"B1": 240,
"B2": 260,
"B3": 300,
"B4": 380,
"B5": 456,
"B6": 528,
"B7": 600}
num_model = args.num_model
if num_model == 'B0':
create_model = efficientnet_b0
if num_model == 'B1':
create_model = efficientnet_b1
if num_model == 'B2':
create_model = efficientnet_b2
if os.path.exists("./weights/{}".format(num_model)) is False:
os.makedirs("./weights/{}".format(num_model))
if os.path.exists("./log") is False:
os.makedirs("./log")
sys.stdout = Logger(filename='./log/{}'.format(num_model), stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(img_size[num_model], scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(img_size[num_model]),
transforms.Resize(260),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
patience = 25 #25个epoch内验证精度不下降
early_stopping= EarlyStopping(patience, verbose=True)
model = create_model(num_classes=args.num_classes).to(device)
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(1000):
# train
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
val_acc, val_loss= evaluate(model=model,
data_loader=val_loader,
device=device)
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch))
print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3)))
early_stopping(val_loss, val_acc, model)
if early_stopping.early_stop:
print("epoch = {}".format(epoch))
break
# test
test_acc, sum_num = evaluate(model=best_model,
data_loader=test_loader,
device=device)
print("best test accuracy: {}".format(round(test_acc, 3)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lrf', type=float, default=0.01)
parser.add_argument('--weights', type=str, default=r'',
help='initial weights path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
parser.add_argument('--num-model', default='B2', help='B0-B7')
opt = parser.parse_args()
main(opt)
| 8,040 | 42.939891 | 147 | py |
HIWL | HIWL-main/noscheme/train_dieleman.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import torch.optim.lr_scheduler as lr_scheduler
from model_dieleman import Dieleman as create_model
from my_dataset import MyDataSet
from utils import read_split_data, train_one_epoch, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/dieleman', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
num_model='dieleman'
print(args)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
tb_writer = SummaryWriter()
if os.path.exists("./weights/dieleman") is False:
os.makedirs("./weights/dieleman")
if os.path.exists("./log") is False:
os.makedirs("./log")
sys.stdout = Logger(stream=sys.stdout)
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25 )),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.Resize((45, 45)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.Resize((45, 45)),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
patience = 25 #25个epoch内验证精度不下降
early_stopping= EarlyStopping(patience, verbose=True)
model = create_model(num_classes=args.num_classes).to(device)
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(1000):
# train
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
val_acc, val_loss= evaluate(model=model,
data_loader=val_loader,
device=device)
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch))
print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3)))
early_stopping(val_loss, val_acc, model)
if early_stopping.early_stop:
print("epoch = {}".format(epoch))
break
# torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch))
# test
test_acc, sum_num = evaluate(model=best_model,
data_loader=test_loader,
device=device)
print("best test accuracy: {}".format(round(test_acc, 3)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lrf', type=float, default=0.01)
parser.add_argument('--weights', type=str, default='',
help='initial weights path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt) | 7,607 | 43.232558 | 147 | py |
HIWL | HIWL-main/noscheme/train_vit.py | import os
import math
import argparse
import sys
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from my_dataset import MyDataSet
from model_vit import vit_base_patch16_224_in21k as create_model
from utils import read_split_data, train_one_epoch, evaluate
from pytorchtools import EarlyStopping
class Logger(object):
def __init__(self, filename='./log/vit', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def main(args):
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
if os.path.exists("./weights/vit") is False:
os.makedirs("./weights/vit")
if os.path.exists("./log") is False:
os.makedirs("./log")
sys.stdout = Logger(stream=sys.stdout)
tb_writer = SummaryWriter()
# 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系
train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path)
data_transform = {
"train": transforms.Compose([transforms.CenterCrop(256),
transforms.RandomRotation((-25, 25)),
transforms.RandomResizedCrop(224, scale=(0.9, 1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
]),
"test": transforms.Compose([transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065])
])}
# 实例化训练数据集
train_data_set = MyDataSet(images_path=train_images_path,
images_class=train_images_label,
transform=data_transform["train"])
# 实例化验证数据集
val_data_set = MyDataSet(images_path=val_images_path,
images_class=val_images_label,
transform=data_transform["test"])
# 实例化测试数据集
test_data_set = MyDataSet(images_path=test_images_path,
images_class=test_images_label,
transform=data_transform["test"])
batch_size = args.batch_size
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
if args.os == 'windows':
nw = 0
print('Using {} dataloader workers every process'.format(nw))
train_loader = torch.utils.data.DataLoader(train_data_set,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=nw,
collate_fn=train_data_set.collate_fn)
val_loader = torch.utils.data.DataLoader(val_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
test_loader = torch.utils.data.DataLoader(test_data_set,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=nw,
collate_fn=val_data_set.collate_fn)
# 如果存在预训练权重则载入
patience = 25 #25个epoch内验证精度不下降
early_stopping= EarlyStopping(patience, verbose=True)
model = create_model(num_classes=args.num_classes).to(device)
if os.path.exists(args.weights):
weights_dict = torch.load(args.weights, map_location=device)
load_weights_dict = {k: v for k, v in weights_dict.items()
if model.state_dict()[k].numel() == v.numel()}
print(model.load_state_dict(load_weights_dict, strict=False))
pg = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
best_val_acc =0.0
best_model=None
for epoch in range(1000):
# train
mean_loss = train_one_epoch(model=model,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epoch)
scheduler.step()
# validate
val_acc, val_loss= evaluate(model=model,
data_loader=val_loader,
device=device)
tags = ["loss", "accuracy", "learning_rate"]
tb_writer.add_scalar(tags[0], mean_loss, epoch)
tb_writer.add_scalar(tags[1], val_acc, epoch)
tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)
if val_acc > best_val_acc:
best_model = model
best_val_acc=val_acc
torch.save(best_model.state_dict(), "./weights/vit/bestmodel-{}.pth".format(epoch))
print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3)))
early_stopping(val_loss, val_acc, model)
if early_stopping.early_stop:
print("epoch = {}".format(epoch))
break
# torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch))
# test
test_acc, sum_num = evaluate(model=best_model,
data_loader=test_loader,
device=device)
print("best test accuracy: {}".format(round(test_acc, 3)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str,
default=r"F:\dataSet\clean gzdata")
parser.add_argument('--num_classes', type=int, default=5)
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lrf', type=float, default=0.01)
parser.add_argument('--weights', type=str, default='',
help='initial weights path')
parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--os', default='windows', help='windows or linux')
opt = parser.parse_args()
main(opt)
| 7,325 | 42.868263 | 147 | py |
HIWL | HIWL-main/noscheme/my_dataset.py | from PIL import Image
import torch
from torch.utils.data import Dataset
class MyDataSet(Dataset):
"""自定义数据集"""
def __init__(self, images_path: list, images_class: list, transform=None):
self.images_path = images_path
self.images_class = images_class
self.transform = transform
def __len__(self):
return len(self.images_path)
def __getitem__(self, item):
img = Image.open(self.images_path[item])
# RGB为彩色图片,L为灰度图片
if img.mode != 'RGB':
raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item]))
label = self.images_class[item]
if self.transform is not None:
img = self.transform(img)
return img, label
@staticmethod
def collate_fn(batch):
images, labels = tuple(zip(*batch))
images = torch.stack(images, dim=0)
labels = torch.as_tensor(labels)
return images, labels
| 952 | 25.472222 | 88 | py |
HIWL | HIWL-main/noscheme/log/model_efficientnet.py | import math
import copy
from functools import partial
from collections import OrderedDict
from typing import Optional, Callable
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import functional as F
def _make_divisible(ch, divisor=8, min_ch=None):
if min_ch is None:
min_ch = divisor
new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_ch < 0.9 * ch:
new_ch += divisor
return new_ch
class ConvBNActivation(nn.Sequential):
def __init__(self,
in_planes: int,
out_planes: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer: Optional[Callable[..., nn.Module]] = None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if activation_layer is None:
activation_layer = nn.SiLU # alias Swish (torch>=1.7)
super(ConvBNActivation, self).__init__(nn.Conv2d(in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False),
norm_layer(out_planes),
activation_layer())
class SqueezeExcitation(nn.Module):
def __init__(self,
input_c: int, # block input channel
expand_c: int, # block expand channel
squeeze_factor: int = 4):
super(SqueezeExcitation, self).__init__()
squeeze_c = input_c // squeeze_factor
self.fc1 = nn.Conv2d(expand_c, squeeze_c, 1)
self.ac1 = nn.SiLU() # alias Swish
self.fc2 = nn.Conv2d(squeeze_c, expand_c, 1)
self.ac2 = nn.Sigmoid()
def forward(self, x: Tensor) -> Tensor:
scale = F.adaptive_avg_pool2d(x, output_size=(1, 1))
scale = self.fc1(scale)
scale = self.ac1(scale)
scale = self.fc2(scale)
scale = self.ac2(scale)
return scale * x
class InvertedResidualConfig:
# kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate
def __init__(self,
kernel: int, # 3 or 5
input_c: int,
out_c: int,
expanded_ratio: int, # 1 or 6
stride: int, # 1 or 2
use_se: bool, # True
drop_rate: float,
index: str, # 1a, 2a, 2b, ...
width_coefficient: float):
self.input_c = self.adjust_channels(input_c, width_coefficient)
self.kernel = kernel
self.expanded_c = self.input_c * expanded_ratio
self.out_c = self.adjust_channels(out_c, width_coefficient)
self.use_se = use_se
self.stride = stride
self.drop_rate = drop_rate
self.index = index
@staticmethod
def adjust_channels(channels: int, width_coefficient: float):
return _make_divisible(channels * width_coefficient, 8)
class InvertedResidual(nn.Module):
def __init__(self,
cnf: InvertedResidualConfig,
norm_layer: Callable[..., nn.Module]):
super(InvertedResidual, self).__init__()
if cnf.stride not in [1, 2]:
raise ValueError("illegal stride value.")
self.use_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c)
layers = OrderedDict()
activation_layer = nn.SiLU # alias Swish
# expand
if cnf.expanded_c != cnf.input_c:
layers.update({"expand_conv": ConvBNActivation(cnf.input_c,
cnf.expanded_c,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer)})
# depthwise
layers.update({"dwconv": ConvBNActivation(cnf.expanded_c,
cnf.expanded_c,
kernel_size=cnf.kernel,
stride=cnf.stride,
groups=cnf.expanded_c,
norm_layer=norm_layer,
activation_layer=activation_layer)})
if cnf.use_se:
layers.update({"se": SqueezeExcitation(cnf.input_c,
cnf.expanded_c)})
# project
layers.update({"project_conv": ConvBNActivation(cnf.expanded_c,
cnf.out_c,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.Identity)})
self.block = nn.Sequential(layers)
self.out_channels = cnf.out_c
self.is_strided = cnf.stride > 1
# 只有在使用shortcut连接时才使用dropout层
if self.use_res_connect and cnf.drop_rate > 0:
self.dropout = nn.Dropout2d(p=cnf.drop_rate, inplace=True)
else:
self.dropout = nn.Identity()
def forward(self, x: Tensor) -> Tensor:
result = self.block(x)
result = self.dropout(result)
if self.use_res_connect:
result += x
return result
class EfficientNet(nn.Module):
def __init__(self,
width_coefficient: float,
depth_coefficient: float,
num_classes: int = 1000,
dropout_rate: float = 0.2,
drop_connect_rate: float = 0.2,
block: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
):
super(EfficientNet, self).__init__()
# kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate, repeats
default_cnf = [[3, 32, 16, 1, 1, True, drop_connect_rate, 1],
[3, 16, 24, 6, 2, True, drop_connect_rate, 2],
[5, 24, 40, 6, 2, True, drop_connect_rate, 2],
[3, 40, 80, 6, 2, True, drop_connect_rate, 3],
[5, 80, 112, 6, 1, True, drop_connect_rate, 3],
[5, 112, 192, 6, 2, True, drop_connect_rate, 4],
[3, 192, 320, 6, 1, True, drop_connect_rate, 1]]
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.1)
adjust_channels = partial(InvertedResidualConfig.adjust_channels,
width_coefficient=width_coefficient)
# build inverted_residual_setting
bneck_conf = partial(InvertedResidualConfig,
width_coefficient=width_coefficient)
b = 0
num_blocks = float(sum(round_repeats(i[-1]) for i in default_cnf))
inverted_residual_setting = []
for stage, args in enumerate(default_cnf):
cnf = copy.copy(args)
for i in range(round_repeats(cnf.pop(-1))):
if i > 0:
# strides equal 1 except first cnf
cnf[-3] = 1 # strides
cnf[1] = cnf[2] # input_channel equal output_channel
cnf[-1] *= b / num_blocks # update dropout ratio
index = str(stage + 1) + chr(i + 97) # 1a, 2a, 2b, ...
inverted_residual_setting.append(bneck_conf(*cnf, index))
b += 1
# create layers
layers = OrderedDict()
# first conv
layers.update({"stem_conv": ConvBNActivation(in_planes=3,
out_planes=adjust_channels(32),
kernel_size=3,
stride=2,
norm_layer=norm_layer)})
# building inverted residual blocks
for cnf in inverted_residual_setting:
layers.update({cnf.index: block(cnf, norm_layer)})
# build top
last_conv_input_c = inverted_residual_setting[-1].out_c
last_conv_output_c = adjust_channels(1280)
layers.update({"top": ConvBNActivation(in_planes=last_conv_input_c,
out_planes=last_conv_output_c,
kernel_size=1,
norm_layer=norm_layer)})
self.features = nn.Sequential(layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
classifier = []
if dropout_rate > 0:
classifier.append(nn.Dropout(p=dropout_rate, inplace=True))
classifier.append(nn.Linear(last_conv_output_c, num_classes))
self.classifier = nn.Sequential(*classifier)
# initial weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def efficientnet_b0(num_classes=1000):
# input image size 224x224
return EfficientNet(width_coefficient=1.0,
depth_coefficient=1.0,
dropout_rate=0.2,
num_classes=num_classes)
def efficientnet_b1(num_classes=1000):
# input image size 240x240
return EfficientNet(width_coefficient=1.0,
depth_coefficient=1.1,
dropout_rate=0.2,
num_classes=num_classes)
def efficientnet_b2(num_classes=1000):
# input image size 260x260
return EfficientNet(width_coefficient=1.1,
depth_coefficient=1.2,
dropout_rate=0.3,
num_classes=num_classes)
def efficientnet_b3(num_classes=1000):
# input image size 300x300
return EfficientNet(width_coefficient=1.2,
depth_coefficient=1.4,
dropout_rate=0.3,
num_classes=num_classes)
def efficientnet_b4(num_classes=1000):
# input image size 380x380
return EfficientNet(width_coefficient=1.4,
depth_coefficient=1.8,
dropout_rate=0.4,
num_classes=num_classes)
def efficientnet_b5(num_classes=1000):
# input image size 456x456
return EfficientNet(width_coefficient=1.6,
depth_coefficient=2.2,
dropout_rate=0.4,
num_classes=num_classes)
def efficientnet_b6(num_classes=1000):
# input image size 528x528
return EfficientNet(width_coefficient=1.8,
depth_coefficient=2.6,
dropout_rate=0.5,
num_classes=num_classes)
def efficientnet_b7(num_classes=1000):
# input image size 600x600
return EfficientNet(width_coefficient=2.0,
depth_coefficient=3.1,
dropout_rate=0.5,
num_classes=num_classes)
| 12,752 | 37.645455 | 102 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/fusion.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import os, random, math
import time
import glob
import numpy as np
import shutil
import torch
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import sys
sys.path.append(os.path.abspath(os.path.join("..", os.getcwd())))
from config import Config
from lib import *
import torch.distributed as dist
from utils import *
from utils.build import *
from lib.model.DSN_v2 import DSNNetV2
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', add_help=False)
parser.add_argument('--data', type=str, default='/path/to/NTU-RGBD/dataset/', help='data dir')
parser.add_argument('--splits', type=str, default='/path/to/NTU-RGBD/dataset/dataset_splits/@CS', help='data dir')
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--test-batch-size', default=32, type=int)
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--config', help='Load Congfile.')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--type', default='M',
help='data types, e.g., "M" or "K"')
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--resume', default='', help='resume from checkpoint')
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment dir')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--shuffle', default=False, action='store_true', help='Tokens shuffle')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=5., metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005,
help='weight decay (default: 0.0005)')
parser.add_argument('--ACCUMULATION-STEPS', type=int, default=0,
help='accumulation step (default: 0.0)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--mixup-dynamic', action='store_true', default=False, help='')
parser.add_argument('--model-ema', default=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Augmentation parameters
parser.add_argument('--autoaug', action='store_true')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# Vision Transformer
parser.add_argument('--model', type=str, default='deit_tiny_patch16_224')
# * ShuffleMix params
parser.add_argument('--shufflemix', type=float, default=0.2,
help='shufflemix alpha, shufflemix enabled if > 0. (default: 0.0)')
parser.add_argument('--smixmode', type=str, default='sm',
help='ShuffleMix strategies (default: "shufflemix(sm)", Per "sm_v1", "sm_v2", or "sm_v3", "mu_sm")')
parser.add_argument('--smprob', type=float, default=0.3, metavar='ShuffleMix Prob',
help='ShuffleMix enable prob (default: 0.3)')
parser.add_argument('--temporal-consist', action='store_true')
parser.add_argument('--tempMix', action='store_true')
parser.add_argument('--MixIntra', action='store_true')
parser.add_argument('--replace-prob', type=float, default=0.25, metavar='MixIntra replace Prob')
# DTN example sampling params
parser.add_argument('--sample-duration', type=int, default=16,
help='The sampled frames in a video.')
parser.add_argument('--intar-fatcer', type=int, default=2,
help='The sampled frames in a video.')
parser.add_argument('--sample-window', type=int, default=1,
help='Range of frames sampling (default: 1)')
parser.add_argument('--translate', type=int, default=0,
help='translate angle (default: 0)')
# * Recoupling params
parser.add_argument('--distill', type=float, default=0.3, metavar='distill param',
help='distillation loss coefficient (default: 0.1)')
parser.add_argument('--temper', type=float, default=0.6, metavar='distillation temperature')
# * Cross modality loss params
parser.add_argument('--DC-weight', type=float, default=0.5, metavar='cross depth loss weight')
# * Rank Pooling params
parser.add_argument('--frp-num', type=int, default=0, metavar='The Number of Epochs.')
parser.add_argument('--w', type=int, default=4, metavar='The slide window of FRP.')
# * fp16 params
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--FusionNet', default=False)
args = parser.parse_args()
args = Config(args)
class FusionModule(nn.Module):
def __init__(self, args, fusion='add'):
super(FusionModule, self).__init__()
self.fusion = fusion
self.args = args
build_model(args)
self.rgb = DSNNetV2(args, num_classes=args.num_classes, pretrained=args.pretrained)
self.depth = DSNNetV2(args, num_classes=args.num_classes, pretrained=args.pretrained)
rgb_checkpoint = args.rgb_checkpoint[args.FusionNet]
self.strat_epoch_r, self.best_acc_r = load_checkpoint(self.rgb, rgb_checkpoint)
print(f'Best acc RGB: {self.best_acc_r}')
depth_checkpoint = args.depth_checkpoint[args.FusionNet]
self.strat_epoch_d, self.best_acc_d = load_checkpoint(self.depth, depth_checkpoint)
print(f'Best acc depth: {self.best_acc_d}')
def forward(self, r, d):
self.args.epoch = self.strat_epoch_r - 1
(r_x, r_xs, r_xm, r_xl), _ = self.rgb(r)
self.args.epoch = self.strat_epoch_d - 1
(d_x, xs, xm, xl), _ = self.depth(d)
distance = F.pairwise_distance(r_x, d_x, p=2)
if self.fusion == 'add':
x = (r_x + d_x) / 2.
else:
x = r_x * d_x
return x, distance
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(args):
utils.init_distributed_mode(args)
print(args)
seed = args.seed + utils.get_rank()
np.random.seed(seed)
cudnn.benchmark = True
torch.manual_seed(seed)
cudnn.enabled = True
torch.cuda.manual_seed(seed)
local_rank = utils.get_rank()
args.nprocs = utils.get_world_size()
print('nprocs:', args.nprocs)
device = torch.device(args.device)
#----------------------------
# build function
#----------------------------
model = FusionModule(args)
model = model.to(device)
valid_queue, valid_sampler = build_dataset(args, phase='valid')
criterion = build_loss(args)
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
print("param size = %fMB"%utils.count_parameters_in_MB(model))
valid_dict = infer(valid_queue, model, criterion, local_rank, device)
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, device, epoch=0):
model.eval()
meter_dict = dict(
CE_loss=AverageMeter(),
)
meter_dict.update(dict(
Acc=AverageMeter(),
Acc_top5=AverageMeter(),
))
meter_dict['distance'] = AverageMeter()
meter_dict['Infer_Time'] = AverageMeter()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
grounds, preds, v_paths = [], [], []
output = {}
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
color, depth = inputs
color, depth, target = map(lambda x: x.to(device, non_blocking=True), [color, depth, target])
features = []
def hook(module, input, output):
features.append(output.clone().detach())
handle = model.module.rgb.dtn.multi_scale_transformers[0][2].register_forward_hook(hook)
handle = model.module.rgb.dtn.multi_scale_transformers[1][2].register_forward_hook(hook)
handle = model.module.rgb.dtn.multi_scale_transformers[2][2].register_forward_hook(hook)
handle = model.module.depth.dtn.multi_scale_transformers[0][2].register_forward_hook(hook)
handle = model.module.depth.dtn.multi_scale_transformers[1][2].register_forward_hook(hook)
handle = model.module.depth.dtn.multi_scale_transformers[2][2].register_forward_hook(hook)
# handle1.remove()
n = target.size(0)
end = time.time()
output, distance = model(color, depth)
distance = F.pairwise_distance(features[0][:, 0]+features[1][:, 0]+features[2][:, 0], features[3][:, 0]+features[4][:, 0]+features[5][:, 0], p=2).mean()
globals()['CE_loss'] = CE(output, target)
globals()['distance'] = distance.mean()
meter_dict['Infer_Time'].update((time.time() - end) / n)
grounds += target.cpu().tolist()
preds += torch.argmax(output, dim=1).cpu().tolist()
v_paths += v_path
torch.distributed.barrier()
globals()['Acc'], globals()['Acc_top5'] = accuracy(output, target, topk=(1, 5))
for name in meter_dict:
if 'Time' not in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
torch.distributed.barrier()
grounds_gather = concat_all_gather(torch.tensor(grounds).to(device))
preds_gather = concat_all_gather(torch.tensor(preds).to(device))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
print(dict([(name, meter_dict[name].avg) for name in meter_dict]))
return meter_dict
if __name__ == '__main__':
try:
main(args)
except KeyboardInterrupt:
torch.cuda.empty_cache() | 16,382 | 45.279661 | 160 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/train.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import time
import glob
import numpy as np
import shutil
import cv2
import os, random, math
import sys
# sys.path.append(os.path.join('..', os.path.abspath(os.path.join(os.getcwd()))) )
from pathlib import Path
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
from timm.optim import create_optimizer
import torch
import utils
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from collections import OrderedDict
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
from utils.visualizer import Visualizer
from config import Config
from lib import *
from utils import *
from timm.utils import get_state_dict #, ModelEma, ModelEmaV2
#------------------------
# evaluation metrics
#------------------------
from sklearn.decomposition import PCA
from sklearn import manifold
import pandas as pd
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
from torchvision.utils import save_image, make_grid
from PIL import Image
from einops import rearrange, repeat
def get_args_parser():
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', add_help=False)
parser.add_argument('--data', type=str, default='/path/to/NTU-RGBD/dataset/', help='data dir')
parser.add_argument('--splits', type=str, default='/path/to/NTU-RGBD/dataset/dataset_splits/@CS', help='data dir')
parser.add_argument('--num-classes', default=None)
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--test-batch-size', default=32, type=int)
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--config', help='Load Congfile.')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--type', default='M',
help='data types, e.g., "M" or "K"')
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--resume', default='', help='resume from checkpoint')
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment dir')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--shuffle', default=False, action='store_true', help='Tokens shuffle')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine", "step", "multistep"')
parser.add_argument('--lr', type=float, default=1e-2, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--decay-milestones', type=list, default=[10, 20, 30], metavar='milestones',
help='epoch interval to milestones decay LR, default list[]')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=5., metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005,
help='weight decay (default: 0.0005)')
parser.add_argument('--ACCUMULATION-STEPS', type=int, default=0,
help='accumulation step (default: 0.0)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--mixup-dynamic', action='store_true', default=False, help='')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Augmentation parameters
parser.add_argument('--autoaug', action='store_true')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
parser.add_argument('--translate', type=int, default=20,
help='translate angle (default: 0)')
parser.add_argument('--strong-aug', action='store_true',
help='Strong Augmentation (default: False)')
parser.add_argument('--resize-rate', type=float, default=0.1,
help='random resize rate (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.0, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * ShuffleMix params
parser.add_argument('--shufflemix', type=float, default=0.2,
help='shufflemix alpha, shufflemix enabled if > 0. (default: 0.0)')
parser.add_argument('--smixmode', type=str, default='sm',
help='ShuffleMix strategies (default: "shufflemix(sm)", Per "sm_v1", "sm_v2", or "sm_v3", "mu_sm")')
parser.add_argument('--smprob', type=float, default=0.3, metavar='ShuffleMix Prob',
help='ShuffleMix enable prob (default: 0.3)')
parser.add_argument('--temporal-consist', action='store_true')
parser.add_argument('--tempMix', action='store_true')
parser.add_argument('--MixIntra', action='store_true')
parser.add_argument('--replace-prob', type=float, default=0.25, metavar='MixIntra replace Prob')
# DTN example sampling params
parser.add_argument('--sample-duration', type=int, default=16,
help='The sampled frames in a video.')
parser.add_argument('--intar-fatcer', type=int, default=2,
help='The sampled frames in a video.')
parser.add_argument('--sample-window', type=int, default=1,
help='Range of frames sampling (default: 1)')
# * Recoupling params
parser.add_argument('--distill', type=float, default=0.3, metavar='distill param',
help='distillation loss coefficient (default: 0.1)')
parser.add_argument('--temper', type=float, default=0.6, metavar='distillation temperature')
# * Cross modality loss params
parser.add_argument('--DC-weight', type=float, default=0.2, metavar='cross depth loss weight')
# * Rank Pooling params
parser.add_argument('--frp-num', type=int, default=0, metavar='The Number of Epochs.')
parser.add_argument('--w', type=int, default=4, metavar='The slide window of FRP.')
# * fp16 params
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
return parser
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(args):
utils.init_distributed_mode(args)
print(args)
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if args.amp_opt_level == 'O0':
logging.info('no apex is used')
seed = args.seed + utils.get_rank()
np.random.seed(seed)
cudnn.benchmark = True
torch.manual_seed(seed)
cudnn.enabled = True
torch.cuda.manual_seed(seed)
local_rank = utils.get_rank()
args.nprocs = utils.get_world_size()
print('nprocs:', args.nprocs)
device = torch.device(args.device)
#----------------------------
# build function
#----------------------------
model = build_model(args)
model = model.to(device)
train_queue, train_sampler = build_dataset(args, phase='train')
valid_queue, valid_sampler = build_dataset(args, phase='valid')
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes, args=args)
else:
args.loss['name'] = 'CE'
optimizer = create_optimizer(args, model_without_ddp)
if args.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
criterion = build_loss(args)
loss_scaler = NativeScaler()
scheduler, _ = create_scheduler(args, optimizer)
if args.finetune:
load_pretrained_checkpoint(model_without_ddp, args.finetune)
if args.resume:
strat_epoch, best_acc = load_checkpoint(model_without_ddp, args.resume, optimizer, scheduler)
print("Start Epoch: {}, Learning rate: {}, Best accuracy: {}".format(strat_epoch, [g['lr'] for g in
optimizer.param_groups],
round(best_acc, 4)))
scheduler.step(strat_epoch - 1)
if args.resumelr:
for g in optimizer.param_groups:
args.resumelr = g['lr'] if not isinstance(args.resumelr, float) else args.resumelr
g['lr'] = args.resumelr
#resume_scheduler = np.linspace(args.resumelr, 1e-5, args.epochs - strat_epoch)
resume_scheduler = cosine_scheduler(args.resumelr, 1e-5, args.epochs - strat_epoch + 1, niter_per_ep=1).tolist()
resume_scheduler.pop(0)
args.epoch = strat_epoch - 1
else:
strat_epoch = 0
best_acc = 0.0
args.epoch = strat_epoch
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model_without_ddp,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume=args.finetune
)
if local_rank == 0:
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("learnable param size = %fMB", utils.count_learnable_parameters_in_MB(model))
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logging.info(f"number of GFLOPs: {flops / 1e9}")
train_results = dict(
train_score=[],
train_loss=[],
valid_score=[],
valid_loss=[],
best_score=0.0
)
first_test = True
if first_test:
args.distill_lamdb = args.distill
valid_acc, _, valid_dict, meter_dict, output = infer(valid_queue, model, criterion, local_rank, strat_epoch, device)
from sklearn.metrics import confusion_matrix, auc, roc_curve, roc_auc_score
num_cat = []
categories = np.unique(valid_dict['grounds'])
cm = confusion_matrix(valid_dict['grounds'], valid_dict['preds'], labels=categories)
fig = plt.figure()
ax = fig.add_subplot()
sns.heatmap(cm, annot=True, fmt='g', ax=ax)
# labels, title and ticks
ax.set_title('Confusion Matrix', fontsize=20)
ax.set_xlabel('Predicted labels', fontsize=16)
ax.set_ylabel('True labels', fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
fig.savefig(os.path.join(args.save, "confusion_matrix"), dpi=fig.dpi)
Accuracy = [(cm[i, i] / sum(cm[i, :])) * 100 if sum(cm[i, :]) != 0 else 0.000001 for i in range(cm.shape[0])]
Precision = [(cm[i, i] / sum(cm[:, i])) * 100 if sum(cm[:, i]) != 0 else 0.000001 for i in range(cm.shape[1])]
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in range(len(Accuracy)):
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('-' * 80)
if args.save_output:
torch.save(output, os.path.join(args.save, '{}-output.pth'.format(args.type)))
if args.eval_only:
return
for epoch in range(strat_epoch, args.epochs):
train_sampler.set_epoch(epoch)
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
if epoch <= args.warmup_epochs:
args.distill_lamdb = 0.
else:
args.distill_lamdb = args.distill
# Warm-Up with FRP
if epoch < args.frp_num:
args.frp = True
else:
args.frp = False
args.epoch = epoch
train_acc, train_obj, meter_dict_train = train_one_epoch(train_queue, model, model_ema, criterion, optimizer, epoch, local_rank, loss_scaler, device, mixup_fn)
valid_acc, valid_obj, valid_dict, meter_dict_val, output = infer(valid_queue, model, criterion, local_rank, epoch, device)
scheduler.step(epoch)
if local_rank == 0:
if valid_acc > best_acc:
best_acc = valid_acc
isbest = True
else:
isbest = False
# logging.info(f'train_acc {round(train_acc, 4)}, top-5 {round(meter_dict_train["Acc_top5"].avg, 4)}, train_loss {round(train_obj, 4)}')
logging.info(f'valid_acc {round(valid_acc, 4)}, best_acc {round(best_acc, 4)}')
state = {'model': model.module.state_dict(),'optimizer': optimizer.state_dict(),
'epoch': epoch + 1, 'bestacc': best_acc,
'scheduler': scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'args': args,
'model_ema': get_state_dict(model_ema) if model_ema is not None else None,
}
save_checkpoint(state, isbest, args.save)
train_results['train_score'].append(train_acc)
train_results['train_loss'].append(train_obj)
train_results['valid_score'].append(valid_acc)
train_results['valid_loss'].append(valid_obj)
train_results['best_score'] = best_acc
train_results.update(valid_dict)
train_results['categories'] = np.unique(valid_dict['grounds'])
if args.visdom['enable']:
vis.plot_many({'train_acc': train_acc, 'loss': train_obj,
'cosin_similar': meter_dict_train['cosin_similar'].avg}, 'Train-' + args.type, epoch)
vis.plot_many({'valid_acc': valid_acc, 'loss': valid_obj,
'cosin_similar': meter_dict_val['cosin_similar'].avg}, 'Valid-' + args.type, epoch)
if isbest:
if args.save_output:
torch.save(output, os.path.join(args.save, '{}-output.pth'.format(args.type)))
EvaluateMetric(PREDICTIONS_PATH=args.save, train_results=train_results, idx=epoch)
for k, v in train_results.items():
if isinstance(v, list):
v.clear()
def train_one_epoch(train_queue, model, model_ema, criterion, optimizer, epoch, local_rank, loss_scaler, device,
mixup_fn=None
):
model.train()
meter_dict = dict(
Total_loss=AverageMeter(),
CE_loss=AverageMeter(),
)
meter_dict['Data_Time'] = AverageMeter()
meter_dict.update(dict(
Acc_s=AverageMeter(),
Acc_m=AverageMeter(),
Acc_l=AverageMeter(),
Acc=AverageMeter(),
Acc_top5=AverageMeter(),
))
if args.distill:
meter_dict['Distil_loss'] = AverageMeter()
if args.model_ema:
meter_dict['DC_loss'] = AverageMeter()
end = time.time()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
rcm_loss = RCM_loss(args, model.module)
for step, (inputs, heatmap, target, _) in enumerate(train_queue):
if args.model_ema:
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
meter_dict['Data_Time'].update((time.time() - end)/args.batch_size)
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
if args.frp:
inputs = heatmap
ori_target, target_aux = target, target
if mixup_fn is not None:
inputs, target = mixup_fn(inputs, target)
if args.model_ema:
inputs_aux, target_aux = mixup_fn(inputs_aux, ori_target)
if args.model_ema:
with torch.no_grad():
(logit_aux, dxs, dxm, dxl), _, = model_ema(inputs_aux)
pseduo_targets = torch.argmax(logit_aux, dim=-1)
images = inputs
(logits, xs, xm, xl), temp_out = model(inputs)
Total_loss = 0.0
if args.MultiLoss:
lamd1, lamd2, lamd3, lamd4 = map(float, args.loss_lamdb)
globals()['CE_loss'] = lamd1*criterion(logits, target) + lamd2*criterion(xs, target) + lamd3*criterion(xm, target) + lamd4*criterion(xl, target)
else:
globals()['CE_loss'] = criterion(logits, target)
Total_loss += CE_loss
if args.distill:
globals()['Distil_loss'] = rcm_loss(temp_out) * args.distill_lamdb
Total_loss += Distil_loss
if args.model_ema:
globals()['DC_loss'] = args.DC_weight * CE(logits, pseduo_targets)
Total_loss += DC_loss
if args.ACCUMULATION_STEPS > 1:
globals()['Total_loss'] = Total_loss / args.ACCUMULATION_STEPS
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
if (step + 1) % args.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
else:
globals()['Total_loss'] = Total_loss
optimizer.zero_grad()
if args.fp16:
with amp.scale_loss(Total_loss, optimizer) as scaled_loss:
scaled_loss.backward()
grad_norm = nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.clip_grad)
else:
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
#---------------------
# Meter performance
#---------------------
torch.distributed.barrier()
globals()['Acc'], globals()['Acc_top5'] = accuracy(logits, ori_target, topk=(1, 5))
globals()['Acc_s'], _ = accuracy(xs, ori_target, topk=(1, 5))
globals()['Acc_m'], _ = accuracy(xm, ori_target, topk=(1, 5))
globals()['Acc_l'], _ = accuracy(xl, ori_target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': '{}/{}'.format(epoch + 1, args.epochs),
'Mini-Batch': '{:0>5d}/{:0>5d}'.format(step + 1,
len(train_queue.dataset) // (args.batch_size * args.nprocs)),
'Lr': optimizer.param_groups[0]["lr"],
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, weight_softmax=torch.softmax(logits, dim=-1))
end = time.time()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
return meter_dict['Acc'].avg, meter_dict['Total_loss'].avg, meter_dict
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, epoch, device, obtain_softmax_score=True):
model.eval()
meter_dict = dict(
Total_loss=AverageMeter(),
)
meter_dict.update(dict(
Acc_sm=AverageMeter(),
Acc_sl=AverageMeter(),
Acc_lm=AverageMeter(),
Acc_all=AverageMeter(),
Acc_adaptive=AverageMeter(),
Acc_adaptive_top5=AverageMeter(),
))
meter_dict['Infer_Time'] = AverageMeter()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
grounds, preds, v_paths = [], {0:[], 1:[], 2:[], 3:[], 4:[]}, []
logits_out = {}
softmax_score = {}
embedding_dict = OrderedDict()
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
if args.model_ema:
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
n = inputs.size(0)
end = time.time()
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
if args.frp:
inputs = heatmap
images = inputs
(logits, xs, xm, xl), temp_out = model(inputs)
Total_loss = 0
if args.MultiLoss:
lamd1, lamd2, lamd3, lamd4 = map(float, args.loss_lamdb)
globals()['CE_loss'] = lamd1 * CE(logits, target) + lamd2 * CE(xs, target) + lamd3 * CE(xm,
target) + lamd4 * CE(
xl, target)
else:
globals()['CE_loss'] = CE(logits, target)
Total_loss += CE_loss
globals()['Total_loss'] = Total_loss
meter_dict['Infer_Time'].update((time.time() - end) / n)
grounds += target.cpu().tolist()
# save logits from outputs
preds[0] += torch.argmax(logits, dim=1).cpu().tolist()
preds[1] += torch.argmax(xs+xm+xl, dim=1).cpu().tolist()
preds[2] += torch.argmax(xs+xm, dim=1).cpu().tolist()
preds[3] += torch.argmax(xs+xl, dim=1).cpu().tolist()
preds[4] += torch.argmax(xl+xm, dim=1).cpu().tolist()
v_paths += v_path
torch.distributed.barrier()
globals()['Acc_adaptive'], globals()['Acc_adaptive_top5'] = accuracy(logits, target, topk=(1, 5))
globals()['Acc_all'], _ = accuracy(xs+xm+xl, target, topk=(1, 5))
globals()['Acc_sm'], _ = accuracy(xs+xm, target, topk=(1, 5))
globals()['Acc_sl'], _ = accuracy(xs+xl, target, topk=(1, 5))
globals()['Acc_lm'], _ = accuracy(xl+xm, target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, v_path, torch.softmax(logits, dim=-1))
if args.save_output:
feature_embedding(temp_out, v_path, embedding_dict)
for t, logit in zip(v_path, logits):
logits_out[t] = logit
if obtain_softmax_score and args.eval_only:
for t, logit in zip(target.cpu().tolist(), logits):
if t not in softmax_score:
softmax_score[t] = [torch.softmax(logit, dim=-1).max(-1)[0]]
else:
softmax_score[t].append(torch.softmax(logit, dim=-1).max(-1)[0])
# select best acc output
acc_list = torch.tensor([meter_dict['Acc_adaptive'].avg, meter_dict['Acc_all'].avg, meter_dict['Acc_sm'].avg, meter_dict['Acc_sl'].avg, meter_dict['Acc_lm'].avg])
best_idx = torch.argmax(acc_list).tolist()
preds = preds[best_idx] # Note: only preds be refined
if obtain_softmax_score and args.eval_only:
softmax_score = dict(sorted(softmax_score.items(), key = lambda i: i[0]))
print('\n', 'The confidence scores for categories: ')
print('| Class ID \t softmax score |')
for k, v in softmax_score.items():
print('| {0} \t {1} |'.format(k, round(float(sum(v)/len(v)), 2)))
print('-' * 80)
grounds_gather = concat_all_gather(torch.tensor(grounds).to(device))
preds_gather = concat_all_gather(torch.tensor(preds).to(device))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
v_paths = np.array(v_paths)
grounds = np.array(grounds)
preds = np.array(preds)
wrong_idx = np.where(grounds != preds)
v_paths = v_paths[wrong_idx[0]]
grounds = grounds[wrong_idx[0]]
preds = preds[wrong_idx[0]]
if epoch % 5 == 0 and args.save_output:
torch.save(embedding_dict, os.path.join(args.save, 'feature-{}-epoch{}.pth'.format(args.type, epoch)))
return acc_list.tolist()[best_idx], meter_dict['Total_loss'].avg, dict(grounds=grounds_gather, preds=preds_gather, valid_images=(v_paths, grounds, preds)), meter_dict, logits_out
if __name__ == '__main__':
# import os
# args.local_rank=os.environ['LOCAL_RANK']
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
args = Config(args)
if args.save and args.local_rank == 0:
Path(args.save).mkdir(parents=True, exist_ok=True)
try:
if args.resume:
args.save = os.path.split(args.resume)[0]
else:
args.save = f'{args.save}'
utils.create_exp_dir(args.save, scripts_to_save=[args.config] + glob.glob('./train.py') + glob.glob('lib/model/*.py'))
except:
pass
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log{}.txt'.format(time.strftime("%Y%m%d-%H%M%S"))))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
main(args) | 33,323 | 44.154472 | 182 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/train_fusion.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import time
import glob
import numpy as np
import shutil
import cv2
import os, random, math
import sys
from pathlib import Path
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
from timm.optim import create_optimizer
import torch
import utils
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from collections import OrderedDict
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
from utils.visualizer import Visualizer
from config import Config
from lib import *
from utils import *
from timm.utils import get_state_dict #, ModelEma, ModelEmaV2
#------------------------
# evaluation metrics
#------------------------
from sklearn.decomposition import PCA
from sklearn import manifold
import pandas as pd
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
from torchvision.utils import save_image, make_grid
from PIL import Image
from einops import rearrange, repeat
def get_args_parser():
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', add_help=False)
parser.add_argument('--data', type=str, default='/path/to/NTU-RGBD/dataset/', help='data dir')
parser.add_argument('--splits', type=str, default='/path/to/NTU-RGBD/dataset/dataset_splits/@CS', help='data dir')
parser.add_argument('--num-classes', default=None)
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--test-batch-size', default=32, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--config', help='Load Congfile.')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--type', default='M',
help='data types, e.g., "M" or "K"')
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--resume', default='', help='resume from checkpoint')
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment dir')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--shuffle', default=False, action='store_true', help='Tokens shuffle')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine", "step", "multistep"')
parser.add_argument('--lr', type=float, default=1e-2, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--decay-milestones', type=list, default=[10, 20, 30], metavar='milestones',
help='epoch interval to milestones decay LR, default list[]')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=5., metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005,
help='weight decay (default: 0.0005)')
parser.add_argument('--ACCUMULATION-STEPS', type=int, default=0,
help='accumulation step (default: 0.0)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--mixup-dynamic', action='store_true', default=False, help='')
# Augmentation parameters
parser.add_argument('--autoaug', action='store_true')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
parser.add_argument('--translate', type=int, default=20,
help='translate angle (default: 0)')
parser.add_argument('--strong-aug', action='store_true',
help='Strong Augmentation (default: False)')
parser.add_argument('--resize-rate', type=float, default=0.1,
help='random resize rate (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.0, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * ShuffleMix params
parser.add_argument('--shufflemix', type=float, default=0.2,
help='shufflemix alpha, shufflemix enabled if > 0. (default: 0.0)')
parser.add_argument('--smixmode', type=str, default='sm',
help='ShuffleMix strategies (default: "shufflemix(sm)", Per "sm_v1", "sm_v2", or "sm_v3", "mu_sm")')
parser.add_argument('--smprob', type=float, default=0.3, metavar='ShuffleMix Prob',
help='ShuffleMix enable prob (default: 0.3)')
parser.add_argument('--temporal-consist', action='store_true')
parser.add_argument('--tempMix', action='store_true')
parser.add_argument('--MixIntra', action='store_true')
parser.add_argument('--replace-prob', type=float, default=0.25, metavar='MixIntra replace Prob')
# DTN example sampling params
parser.add_argument('--sample-duration', type=int, default=16,
help='The sampled frames in a video.')
parser.add_argument('--intar-fatcer', type=int, default=2,
help='The sampled frames in a video.')
parser.add_argument('--sample-window', type=int, default=1,
help='Range of frames sampling (default: 1)')
# * Recoupling params
parser.add_argument('--distill', type=float, default=0.3, metavar='distill param',
help='distillation loss coefficient (default: 0.1)')
parser.add_argument('--temper', type=float, default=0.6, metavar='distillation temperature')
# * Cross modality loss params
parser.add_argument('--DC-weight', type=float, default=0.2, metavar='cross depth loss weight')
# * fp16 params
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
# * Rank Pooling params
parser.add_argument('--frp-num', type=int, default=0, metavar='The Number of Epochs.')
parser.add_argument('--w', type=int, default=4, metavar='The slide window of FRP.')
parser.add_argument('--FusionNet', default='cs32', choices=['cs16', 'cs32', 'cs64', 'cv16', 'cv32', 'cv64'],
help='used for multi-modal fusion.')
parser.add_argument('--scc-depth', type=int, default=2, metavar='SCC depth')
parser.add_argument('--tcc-depth', type=int, default=4, metavar='TCC depth')
return parser
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(args):
utils.init_distributed_mode(args)
print(args)
if args.Network != 'FusionNet':
logging.info('Reset the model to the fusion training state.')
args.Network = 'FusionNet'
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if args.amp_opt_level == 'O0':
logging.info('no apex is used')
seed = args.seed + utils.get_rank()
np.random.seed(seed)
cudnn.benchmark = True
torch.manual_seed(seed)
cudnn.enabled = True
torch.cuda.manual_seed(seed)
local_rank = utils.get_rank()
args.nprocs = utils.get_world_size()
print('nprocs:', args.nprocs)
device = torch.device(args.device)
#----------------------------
# build function
#----------------------------
model = build_model(args)
model = model.to(device)
train_queue, train_sampler = build_dataset(args, phase='train')
valid_queue, valid_sampler = build_dataset(args, phase='valid')
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes, args=args)
else:
args.loss['name'] = 'CE'
optimizer = create_optimizer(args, model_without_ddp)
if args.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
criterion = build_loss(args)
loss_scaler = NativeScaler()
scheduler, _ = create_scheduler(args, optimizer)
if args.finetune:
load_pretrained_checkpoint(model_without_ddp, args.finetune)
if args.resume:
strat_epoch, best_acc = load_checkpoint(model_without_ddp, args.resume, optimizer, scheduler)
print("Start Epoch: {}, Learning rate: {}, Best accuracy: {}".format(strat_epoch, [g['lr'] for g in
optimizer.param_groups],
round(best_acc, 4)))
scheduler.step(strat_epoch - 1)
if args.resumelr:
for g in optimizer.param_groups:
args.resumelr = g['lr'] if not isinstance(args.resumelr, float) else args.resumelr
g['lr'] = args.resumelr
#resume_scheduler = np.linspace(args.resumelr, 1e-5, args.epochs - strat_epoch)
resume_scheduler = cosine_scheduler(args.resumelr, 1e-5, args.epochs - strat_epoch + 1, niter_per_ep=1).tolist()
resume_scheduler.pop(0)
args.epoch = strat_epoch - 1
else:
strat_epoch = 0
best_acc = 0.0
args.epoch = strat_epoch
if local_rank == 0:
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("learnable param size = %fMB", utils.count_learnable_parameters_in_MB(model))
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logging.info(f"number of GFLOPs: {flops / 1e9}")
captuer=None
if args.FusionNet:
captuer = FeatureCapter(args, num_classes=args.num_classes)
captuer = captuer.to(device)
captuer.eval()
train_results = dict(
train_score=[],
train_loss=[],
valid_score=[],
valid_loss=[],
best_score=0.0
)
first_test = True
if first_test:
args.distill_lamdb = args.distill
valid_acc, _, valid_dict, meter_dict, output = infer(valid_queue, model, criterion, local_rank, strat_epoch, device, captuer)
from sklearn.metrics import confusion_matrix, auc, roc_curve, roc_auc_score
num_cat = []
categories = np.unique(valid_dict['grounds'])
cm = confusion_matrix(valid_dict['grounds'], valid_dict['preds'], labels=categories)
fig = plt.figure()
ax = fig.add_subplot()
sns.heatmap(cm, annot=True, fmt='g', ax=ax)
# labels, title and ticks
ax.set_title('Confusion Matrix', fontsize=20)
ax.set_xlabel('Predicted labels', fontsize=16)
ax.set_ylabel('True labels', fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
fig.savefig(os.path.join(args.save, "confusion_matrix"), dpi=fig.dpi)
Accuracy = [(cm[i, i] / sum(cm[i, :])) * 100 if sum(cm[i, :]) != 0 else 0.000001 for i in range(cm.shape[0])]
Precision = [(cm[i, i] / sum(cm[:, i])) * 100 if sum(cm[:, i]) != 0 else 0.000001 for i in range(cm.shape[1])]
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in range(len(Accuracy)):
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('-' * 80)
if args.eval_only:
return
for epoch in range(strat_epoch, args.epochs):
train_sampler.set_epoch(epoch)
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
args.epoch = epoch
train_acc, train_obj, meter_dict_train = train_one_epoch(train_queue, model, criterion, optimizer, epoch, local_rank, loss_scaler, device, mixup_fn, captuer)
valid_acc, valid_obj, valid_dict, meter_dict_val, output = infer(valid_queue, model, criterion, local_rank, epoch, device, captuer)
scheduler.step(epoch)
if local_rank == 0:
if valid_acc > best_acc:
best_acc = valid_acc
isbest = True
else:
isbest = False
# logging.info(f'train_acc {round(train_acc, 4)}, top-5 {round(meter_dict_train["Acc_top5"].avg, 4)}, train_loss {round(train_obj, 4)}')
logging.info(f'valid_acc {round(valid_acc, 4)}, best_acc {round(best_acc, 4)}')
state = {'model': model.module.state_dict(),'optimizer': optimizer.state_dict(),
'epoch': epoch + 1, 'bestacc': best_acc,
'scheduler': scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'args': args,
}
save_checkpoint(state, isbest, args.save)
train_results['train_score'].append(train_acc)
train_results['train_loss'].append(train_obj)
train_results['valid_score'].append(valid_acc)
train_results['valid_loss'].append(valid_obj)
train_results['best_score'] = best_acc
train_results.update(valid_dict)
train_results['categories'] = np.unique(valid_dict['grounds'])
if args.visdom['enable']:
vis.plot_many({'train_acc': train_acc, 'loss': train_obj,
'cosin_similar': meter_dict_train['cosin_similar'].avg}, 'Train-' + args.type, epoch)
vis.plot_many({'valid_acc': valid_acc, 'loss': valid_obj,
'cosin_similar': meter_dict_val['cosin_similar'].avg}, 'Valid-' + args.type, epoch)
if isbest:
EvaluateMetric(PREDICTIONS_PATH=args.save, train_results=train_results, idx=epoch)
for k, v in train_results.items():
if isinstance(v, list):
v.clear()
def train_one_epoch(train_queue, model, criterion, optimizer, epoch, local_rank, loss_scaler, device,
mixup_fn=None, captuer=None
):
model.train()
meter_dict = dict(
Total_loss=AverageMeter(),
)
meter_dict['Data_Time'] = AverageMeter()
meter_dict.update(dict(
Acc_s=AverageMeter(),
Acc_m=AverageMeter(),
Acc_l=AverageMeter(),
Acc_rgbd=AverageMeter(),
Acc_rgbd_top5=AverageMeter(),
Acc_all = AverageMeter(),
Acc_all_top5=AverageMeter(),
))
meter_dict['Fusion_loss'] = AverageMeter()
meter_dict['Acc_fusion'] = AverageMeter()
meter_dict['Acc_fusion_top5'] = AverageMeter()
end = time.time()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
rcm_loss = RCM_loss(args, model.module)
for step, (inputs, heatmap, target, _) in enumerate(train_queue):
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
meter_dict['Data_Time'].update((time.time() - end)/args.batch_size)
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
ori_target, target_aux = target, target
if mixup_fn is not None:
inputs, target = mixup_fn(inputs, target)
inputs_aux, target_aux = mixup_fn(inputs_aux, ori_target)
images = inputs
with torch.no_grad():
(logits, xs, xm, xl), (logit_K, K_xs, K_xm, K_xl), hidden_feature = captuer(inputs, inputs_aux)
output, _ = model(hidden_feature)
fusion_loss = criterion(output[0], target) + criterion(output[1], target_aux)
globals()['Fusion_loss'] = fusion_loss
Total_loss = fusion_loss
globals()['Acc_fusion'], globals()['Acc_fusion_top5'] = accuracy((output[0] + output[1])/2.0, ori_target, topk=(1, 5))
if args.ACCUMULATION_STEPS > 1:
globals()['Total_loss'] = Total_loss / args.ACCUMULATION_STEPS
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
if (step + 1) % args.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
else:
globals()['Total_loss'] = Total_loss
optimizer.zero_grad()
if args.fp16:
with amp.scale_loss(Total_loss, optimizer) as scaled_loss:
scaled_loss.backward()
grad_norm = nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.clip_grad)
else:
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
#---------------------
# Meter performance
#---------------------
torch.distributed.barrier()
globals()['Acc_rgbd'], globals()['Acc_rgbd_top5'] = accuracy(logits+logit_K, ori_target, topk=(1, 5))
globals()['Acc_all'], globals()['Acc_all_top5'] = accuracy((output[0] + output[1] + logits + logit_K)/4.0, ori_target, topk=(1, 5))
globals()['Acc_s'], _ = accuracy(xs+K_xs, ori_target, topk=(1, 5))
globals()['Acc_m'], _ = accuracy(xm+K_xm, ori_target, topk=(1, 5))
globals()['Acc_l'], _ = accuracy(xl+K_xl, ori_target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': '{}/{}'.format(epoch + 1, args.epochs),
'Mini-Batch': '{:0>5d}/{:0>5d}'.format(step + 1,
len(train_queue.dataset) // (args.batch_size * args.nprocs)),
'Lr': optimizer.param_groups[0]["lr"],
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, weight_softmax=torch.softmax(logits, dim=-1), FusionNet=True)
end = time.time()
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
return meter_dict['Acc_all'].avg, meter_dict['Total_loss'].avg, meter_dict
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, epoch, device, captuer, obtain_softmax_score=True):
model.eval()
meter_dict = dict(
Total_loss=AverageMeter(),
)
meter_dict.update(dict(
Acc_r=AverageMeter(),
Acc_r_top5=AverageMeter(),
Acc_d=AverageMeter(),
Acc_d_top5=AverageMeter(),
Acc_rgbd = AverageMeter(),
Acc_rgbd_top5=AverageMeter(),
Acc_fusion=AverageMeter(),
Acc_fusion_top5=AverageMeter(),
))
meter_dict['Infer_Time'] = AverageMeter()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
grounds, preds, v_paths = [], {0:[], 1:[]}, []
logits_out = {}
softmax_score = {}
embedding_dict = OrderedDict()
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
n = inputs.size(0)
end = time.time()
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
images = inputs
(logits, xs, xm, xl), (logit_K, K_xs, K_xm, K_xl), hidden_feature = captuer(inputs, inputs_aux)
output, temp_out = model(hidden_feature)
Fusion_loss = CE(output[0], target) + CE(output[1], target)
Total_loss = Fusion_loss
globals()['Total_loss'] = Total_loss
meter_dict['Infer_Time'].update((time.time() - end) / n)
v_paths += v_path
grounds += target.cpu().tolist()
# save logits from outputs
preds[0] += torch.argmax((logits+logit_K)/2., dim=1).cpu().tolist()
preds[1] += torch.argmax((logits + logit_K + output[0] + output[1])/4., dim=1).cpu().tolist()
torch.distributed.barrier()
globals()['Acc_r'], globals()['Acc_r_top5'] = accuracy(logits, target, topk=(1, 5))
globals()['Acc_d'], globals()['Acc_d_top5'] = accuracy(logit_K, target, topk=(1, 5))
globals()['Acc_rgbd'], globals()['Acc_rgbd_top5'] = accuracy((logits+logit_K)/2., target, topk=(1, 5))
globals()['Acc_fusion'], globals()['Acc_fusion_top5'] = accuracy((logits + logit_K + output[0] + output[1])/4., target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, v_path, torch.softmax(logits, dim=-1), FusionNet=True)
# select best acc output
acc_list = torch.tensor([meter_dict['Acc_fusion'].avg, meter_dict['Acc_rgbd'].avg])
best_idx = torch.argmax(acc_list).tolist()
preds = preds[best_idx] # Note: only preds be refined
grounds_gather = concat_all_gather(torch.tensor(grounds).to(device))
preds_gather = concat_all_gather(torch.tensor(preds).to(device))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
v_paths = np.array(v_paths)
grounds = np.array(grounds)
preds = np.array(preds)
wrong_idx = np.where(grounds != preds)
v_paths = v_paths[wrong_idx[0]]
grounds = grounds[wrong_idx[0]]
preds = preds[wrong_idx[0]]
return acc_list.tolist()[best_idx], meter_dict['Total_loss'].avg, dict(grounds=grounds_gather, preds=preds_gather, valid_images=(v_paths, grounds, preds)), meter_dict, logits_out
if __name__ == '__main__':
# import os
# args.local_rank=os.environ['LOCAL_RANK']
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
args = Config(args)
if args.save and args.local_rank == 0:
Path(args.save).mkdir(parents=True, exist_ok=True)
try:
if args.resume:
args.save = os.path.split(args.resume)[0]
else:
args.save = f'{args.save}'
utils.create_exp_dir(args.save, scripts_to_save=[args.config] + glob.glob('./train.py') + glob.glob('lib/model/*.py'))
except:
pass
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log{}.txt'.format(time.strftime("%Y%m%d-%H%M%S"))))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
main(args) | 30,632 | 45.064662 | 183 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/tools/fusion.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import os, random, math
import time
import glob
import numpy as np
import shutil
import torch
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import sys
sys.path.append(os.path.abspath(os.path.join("..", os.getcwd())))
from config import Config
from lib import *
import torch.distributed as dist
from utils import *
from utils.build import *
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Place config Congfile!')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--fp16', action='store_true', help='Training with fp16')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment name')
parser.add_argument('--seed', type=int, default=123, help='random seed')
args = parser.parse_args()
args = Config(args)
#====================================================
# Some configuration
#====================================================
try:
if args.resume:
args.save = os.path.split(args.resume)[0]
else:
args.save = '{}/{}-EXP-{}'.format(args.save, args.Network, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=[args.config] + glob.glob('./tools/train*.py')+glob.glob('./lib/model/*.py'))
except:
pass
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log{}.txt'.format(time.strftime("%Y%m%d-%H%M%S"))))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
#---------------------------------
# Fusion Net Training
#---------------------------------
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(local_rank, nprocs, args):
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % local_rank)
# ---------------------------
# Init distribution
# ---------------------------
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(backend='nccl')
# ----------------------------
# build function
# ----------------------------
model = build_model(args)
model = model.cuda(local_rank)
criterion = build_loss(args)
optimizer = build_optim(args, model)
scheduler = build_scheduler(args, optimizer)
train_queue, train_sampler = build_dataset(args, phase='train')
valid_queue, valid_sampler = build_dataset(args, phase='valid')
if args.resume:
model, optimizer, strat_epoch, best_acc = load_checkpoint(model, args.resume, optimizer)
logging.info("The network will resume training.")
logging.info("Start Epoch: {}, Learning rate: {}, Best accuracy: {}".format(strat_epoch, [g['lr'] for g in
optimizer.param_groups],
round(best_acc, 4)))
if args.resumelr:
for g in optimizer.param_groups: g['lr'] = args.resumelr
args.resume_scheduler = cosine_scheduler(args.resumelr, 1e-5, args.epochs - strat_epoch, len(train_queue))
else:
strat_epoch = 0
best_acc = 0.0
scheduler[0].last_epoch = strat_epoch
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], find_unused_parameters=True)
if local_rank == 0:
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
train_results = dict(
train_score=[],
train_loss=[],
valid_score=[],
valid_loss=[],
best_score=0.0
)
if args.eval_only:
valid_acc, _, _, meter_dict = infer(valid_queue, model, criterion, local_rank, 0)
valid_acc = max(meter_dict['Acc_all'].avg, meter_dict['Acc'].avg, meter_dict['Acc_3'].avg)
logging.info('valid_acc: {}, Acc_1: {}, Acc_2: {}, Acc_3: {}'.format(valid_acc, meter_dict['Acc_1'].avg, meter_dict['Acc_2'].avg, meter_dict['Acc_3'].avg))
return
#---------------------------
# Mixed Precision Training
# --------------------------
if args.fp16:
scaler = torch.cuda.amp.GradScaler()
else:
scaler = None
for epoch in range(strat_epoch, args.epochs):
train_sampler.set_epoch(epoch)
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
if epoch < args.scheduler['warm_up_epochs']:
for g in optimizer.param_groups:
g['lr'] = scheduler[-1](epoch)
args.epoch = epoch
train_acc, train_obj, meter_dict_train = train(train_queue, model, criterion, optimizer, epoch, local_rank, scaler)
valid_acc, valid_obj, valid_dict, meter_dict_val = infer(valid_queue, model, criterion, local_rank, epoch)
valid_acc = max(meter_dict_val['Acc_all'].avg, meter_dict_val['Acc'].avg, meter_dict_val['Acc_3'].avg)
if epoch >= args.scheduler['warm_up_epochs']:
if args.scheduler['name'] == 'ReduceLR':
scheduler[0].step(valid_acc)
else:
scheduler[0].step()
if local_rank == 0:
if valid_acc > best_acc:
best_acc = valid_acc
isbest = True
else:
isbest = False
logging.info('train_acc %f', train_acc)
logging.info('valid_acc: {}, Acc_1: {}, Acc_2: {}, Acc_3: {}, best acc: {}'.format(meter_dict_val['Acc'].avg, meter_dict_val['Acc_1'].avg,
meter_dict_val['Acc_2'].avg,
meter_dict_val['Acc_3'].avg, best_acc))
state = {'model': model.module.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch + 1, 'bestacc': best_acc}
save_checkpoint(state, isbest, args.save)
train_results['train_score'].append(train_acc)
train_results['train_loss'].append(train_obj)
train_results['valid_score'].append(valid_acc)
train_results['valid_loss'].append(valid_obj)
train_results['best_score'] = best_acc
train_results.update(valid_dict)
train_results['categories'] = np.unique(valid_dict['grounds'])
if isbest:
EvaluateMetric(PREDICTIONS_PATH=args.save, train_results=train_results, idx=epoch)
for k, v in train_results.items():
if isinstance(v, list):
v.clear()
def train(train_queue, model, criterion, optimizer, epoch, local_rank, scaler):
model.train()
meter_dict = dict(
Total_loss=AverageMeter(),
MSE_loss=AverageMeter(),
CE_loss=AverageMeter(),
BCE_loss=AverageMeter(),
Distill_loss = AverageMeter()
)
meter_dict['Data_Time'] = AverageMeter()
meter_dict.update(dict(
Acc_1=AverageMeter(),
Acc_2=AverageMeter(),
Acc_3=AverageMeter(),
Acc=AverageMeter()
))
end = time.time()
for step, (inputs, heatmap, target, _) in enumerate(train_queue):
meter_dict['Data_Time'].update((time.time() - end)/args.batch_size)
inputs, target, heatmap = map(lambda x: [d.cuda(local_rank, non_blocking=True) for d in x] if isinstance(x, list) else x.cuda(local_rank, non_blocking=True), [inputs, target, heatmap])
if args.resumelr:
for g in optimizer.param_groups:
g['lr'] = args.resume_scheduler[len(train_queue) * args.resume_epoch + step]
# ---------------------------
# Mixed Precision Training
# --------------------------
if args.fp16:
print('Train with FP16')
optimizer.zero_grad()
# Runs the forward pass with autocasting.
with torch.cuda.amp.autocast():
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
globals()['CE_loss'] = CE_loss
globals()['MSE_loss'] = MSE_loss
globals()['BCE_loss'] = BCE_loss
globals()['Distill_loss'] = distillation
globals()['Total_loss'] = CE_loss + MSE_loss + BCE_loss + distillation
scaler.scale(Total_loss).backward()
# Unscales the gradients of optimizer's assigned params in-place
scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(model.module.parameters(), args.grad_clip)
scaler.step(optimizer)
scaler.update()
else:
# ---------------------------
# Fp32 Precision Training
# --------------------------
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
globals()['CE_loss'] = CE_loss
globals()['MSE_loss'] = MSE_loss
globals()['BCE_loss'] = BCE_loss
globals()['Distill_loss'] = distillation
globals()['Total_loss'] = CE_loss + MSE_loss + BCE_loss + distillation
optimizer.zero_grad()
Total_loss.backward()
nn.utils.clip_grad_norm_(model.module.parameters(), args.grad_clip)
optimizer.step()
#---------------------
# Meter performance
#---------------------
torch.distributed.barrier()
globals()['Acc'] = calculate_accuracy(logits, target)
globals()['Acc_1'] = calculate_accuracy(logit_r, target)
globals()['Acc_2'] = calculate_accuracy(logit_d, target)
globals()['Acc_3'] = calculate_accuracy(logit_r+logit_d, target)
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': '{}/{}'.format(epoch + 1, args.epochs),
'Mini-Batch': '{:0>5d}/{:0>5d}'.format(step + 1,
len(train_queue.dataset) // (args.batch_size * args.nprocs)),
'Lr': ['{:.4f}'.format(g['lr']) for g in optimizer.param_groups],
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
end = time.time()
args.resume_epoch += 1
return meter_dict['Acc'].avg, meter_dict['Total_loss'].avg, meter_dict
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, epoch):
model.eval()
meter_dict = dict(
Total_loss=AverageMeter(),
MSE_loss=AverageMeter(),
CE_loss=AverageMeter(),
Distill_loss=AverageMeter()
)
meter_dict.update(dict(
Acc_1=AverageMeter(),
Acc_2=AverageMeter(),
Acc_3=AverageMeter(),
Acc = AverageMeter(),
Acc_all=AverageMeter(),
))
meter_dict['Infer_Time'] = AverageMeter()
grounds, preds, v_paths = [], [], []
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
end = time.time()
inputs, target, heatmap = map(
lambda x: [d.cuda(local_rank, non_blocking=True) for d in x] if isinstance(x, list) else x.cuda(local_rank,
non_blocking=True),
[inputs, target, heatmap])
if args.fp16:
with torch.cuda.amp.autocast():
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
else:
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
meter_dict['Infer_Time'].update((time.time() - end) / args.test_batch_size)
globals()['CE_loss'] = CE_loss
globals()['MSE_loss'] = MSE_loss
globals()['BCE_loss'] = BCE_loss
globals()['Distill_loss'] = distillation
globals()['Total_loss'] = CE_loss + MSE_loss + BCE_loss + distillation
torch.distributed.barrier()
globals()['Acc'] = calculate_accuracy(logits, target)
globals()['Acc_1'] = calculate_accuracy(logit_r, target)
globals()['Acc_2'] = calculate_accuracy(logit_d, target)
globals()['Acc_3'] = calculate_accuracy(logit_r+logit_d, target)
globals()['Acc_all'] = calculate_accuracy(logit_r+logit_d+logits, target)
grounds += target.cpu().tolist()
preds += torch.argmax(logits, dim=1).cpu().tolist()
v_paths += v_path
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
torch.distributed.barrier()
grounds_gather = concat_all_gather(torch.tensor(grounds).cuda(local_rank))
preds_gather = concat_all_gather(torch.tensor(preds).cuda(local_rank))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
if local_rank == 0:
v_paths = np.array(v_paths)
grounds = np.array(grounds)
preds = np.array(preds)
wrong_idx = np.where(grounds != preds)
v_paths = v_paths[wrong_idx[0]]
grounds = grounds[wrong_idx[0]]
preds = preds[wrong_idx[0]]
return meter_dict['Acc'].avg, meter_dict['Total_loss'].avg, dict(grounds=grounds_gather, preds=preds_gather, valid_images=(v_paths, grounds, preds)), meter_dict
if __name__ == '__main__':
try:
main(args.local_rank, args.nprocs, args)
except KeyboardInterrupt:
torch.cuda.empty_cache()
if os.path.exists(args.save) and len(os.listdir(args.save)) < 3:
print('remove ‘{}’: Directory'.format(args.save))
os.system('rm -rf {} \n mv {} ./Checkpoints/trash'.format(args.save, args.save))
os._exit(0)
except Exception:
print(traceback.print_exc())
if os.path.exists(args.save) and len(os.listdir(args.save)) < 3:
print('remove ‘{}’: Directory'.format(args.save))
os.system('rm -rf {} \n mv {} ./Checkpoints/trash'.format(args.save, args.save))
os._exit(0)
finally:
torch.cuda.empty_cache()
| 16,564 | 40.830808 | 192 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/demo/cluster.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
import torch.nn.functional as F
from scipy.spatial.distance import pdist
import pandas as pd
from sklearn import manifold
import numpy as np
import sys
import sklearn
from sklearn import metrics
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
M_path = '/mnt/workspace/Code/MotionRGBD-PAMI/Checkpoints/THU-READ4-32-DTNV2-M-simi/'
K_path = '/mnt/workspace/Code/MotionRGBD-PAMI/Checkpoints/THU-READ4-32-DTNV2-K-simi-cross/'
def normalization(data):
_range = torch.max(data) - torch.min(data)
return (data - torch.min(data)) / _range
def standardization(data):
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
return (data - mu) / sigma
data = []
for i in range(0, 100, 5):
M_checkpoint = M_path + f'/feature-M-epoch{i}.pth'
M_features = torch.load(M_checkpoint, map_location='cpu')
K_checkpoint = K_path + f'/feature-K-epoch{i}.pth'
K_features = torch.load(K_checkpoint, map_location='cpu')
simlitary = []
for (km, vm), (kd, vd) in zip(M_features.items(), K_features.items()):
assert km == kd
# pca_data = pd.DataFrame(vm.cpu().numpy())
# vm = torch.tensor(tsne.fit_transform(pca_data))
# pca_data = pd.DataFrame(vd.cpu().numpy())
# vd = torch.tensor(tsne.fit_transform(pca_data))
# vm, vd = normalization(vm), normalization(vd) #F.normalize(vm, p = 2, dim=-1), F.normalize(vd, p = 2, dim=-1) #
simil = F.pairwise_distance(vm.unsqueeze(0), vd.unsqueeze(0), p=2)
# simil = torch.cosine_similarity(vm, vd, dim=-1)
# simil = torch.tensor(pdist(np.vstack([vm.numpy(),vd.numpy()]),'seuclidean')[0])
# simil = vm * vd
simlitary.append(simil.unsqueeze(0))
simi_value = torch.cat(simlitary).mean()
data.append(float(simi_value))
M_embed = torch.cat([F.normalize(e.unsqueeze(0), p = 2, dim=-1) for e in M_features.values()])
K_embed = torch.cat([F.normalize(e.unsqueeze(0), p = 2, dim=-1) for e in K_features.values()])
embed = torch.cat((M_embed, K_embed))
label_embed = torch.cat((torch.ones(M_embed.shape[0]), torch.ones(M_embed.shape[0])+1))
tsne = manifold.TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
embed = pd.DataFrame(embed.cpu().numpy())
pca_embed = tsne.fit_transform(embed)
embed.insert(0, 'label', pd.DataFrame(label_embed.cpu().numpy()))
print(pca_embed.shape)
fig, ax = plt.subplots()
scatter = ax.scatter(pca_embed[:, 0], pca_embed[:, 1], c=embed['label'], s=25, cmap='rainbow',
alpha=0.8, edgecolors='none')
plt.savefig("./"+'cluster.png', dpi=120, bbox_inches='tight') | 2,693 | 37.485714 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/demo/plot.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from mpl_toolkits import axisartist
import seaborn as sns
import numpy as np
import re
import sys
import os, argparse, random
import torch
def plot_curve(datas, flag, show_value=False):
fig = plt.figure(figsize=(12, 7))
ax = fig.add_subplot()
for name, data in datas:
plt.plot(data, '-', label=name)
if show_value:
for a, b in zip(range(len(data)), data):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=9)
ax.set_ylabel('value')
ax.set_xlabel('epoch')
# plt.xticks(range(len(data)), rotation=0)
plt.grid()
plt.legend()
plt.savefig('./{}.png'.format(flag), dpi=fig.dpi)
#--------------------------------------
# Plot cvpr2022 multi-scale result: bar
#--------------------------------------
def multiscale():
name = ['Spatial-temporal I3D', 'Spatial Inception CNN\n + \n Single-scale Trans', 'Spatial Inception CNN \n + \n Dual-scale Trans', 'Spatial Inception CNN \n + \n Multi-scale Trans']
y = [68.54, 69.67, 72.20, 73.16]
y1 = [65.50, 68.33, 69.58, 70.50]
fig = plt.figure(figsize=(12, 7), dpi=100)
ax = fig.add_subplot()
bar_high = 0.4
x = np.arange(len(name))
b1 = ax.bar(x, y, width=bar_high, label='NvGesture', color=sns.xkcd_rgb["pale red"])
b2 = ax.bar(x+bar_high, y1, width=bar_high, label='THU-READ', color=sns.xkcd_rgb["denim blue"])
# labels, title and ticks
ax.set_ylabel('Accuracy(%)', fontsize=16)
plt.xticks(x + bar_high / 2, name, rotation=0,
# fontweight='bold',
fontsize=16)
# plt.xlim(0, 100)
plt.ylim(60, 75)
for a, b, c in zip(x, y, y1):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=16)
plt.text(a+bar_high, c + 0.05, '%.2f' % c, ha='center', va='bottom', fontsize=16)
# for rect, rect1 in zip(b1, b2):
# wd = rect.get_width()
# plt.text(wd, rect.get_x() + 0.5 / 2, str(wd), va='center')
#
# wd = rect1.get_width()
# plt.text(wd, rect1.get_x() + 0.5 / 2, str(wd), va='center')
plt.legend(handles=[b1, b2])
plt.show()
def FRPWindowsAndKnn():
name1 = [2, 5, 10, 15]
name2 = ["20%", '40%', '50%', '60%', '70%']
# Nv1 = [0.00, 76.04, 76.25, 75.00]
Nv1 = [76.67, 77.08, 78.57, 73.33]
# Nv2 = [0.00, 74.17, 74.38, 75.42, 72.71]
Nv2 = [77.71, 75.42, 78.13, 76.25, 76.67]
# thu1 = [79.58, 78.75, 75.00, 78.75, 0.00]
thu1 = [61.25, 59.17, 62.50, 58.75]
thu2 = [59.17, 60.41, 61.25, 60.42, 64.58]
fig = plt.figure()
ax1 = fig.add_subplot(121)
plt.plot(range(len(name1)), Nv1, 'bo--', label='NvGesture')
for a, b in zip(range(len(name1)), Nv1):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=9)
plt.plot(range(len(name1)), thu1, 'ro--', label='THU-READ')
for a, b in zip(range(len(name1)), thu1):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=9)
ax1.set_ylabel('Accuracy(%)')
ax1.set_xlabel('Window Size')
plt.xticks(range(len(name1)), name1, rotation=0)
plt.grid()
plt.legend()
ax2 = fig.add_subplot(122)
plt.plot(range(len(name2)), Nv2, 'bo--', label='NvGesture')
for a, b in zip(range(len(name2)), Nv2):
plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=9)
plt.plot(range(len(name2)), thu2, 'ro--', label='THU-READ')
for a, b in zip(range(len(name2)), thu2):
plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=9)
ax2.set_ylabel('Accuracy(%)')
ax2.set_xlabel('Sparse Rate')
plt.xticks(range(len(name2)), name2, rotation=0)
plt.grid()
plt.legend()
plt.show()
def Recoupling():
fontsize = 24
linewidth = 4
name = [20, 30, 40, 50, 60, 70, 80]
valueWO = [80.5, 82.7, 85.4, 84.8, 85.6, 85.0, 85.2]
valueW = [83.3, 84.1, 89.5, 87.0, 88.5, 87.2, 88.1]
fig = plt.figure(figsize=(18, 8))
ax = fig.add_subplot(121)
plt.plot(range(len(name)), valueWO, 'bo-', label='W/O Recoupling-NV', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueWO):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
plt.plot(range(len(name)), valueW, 'bo--', label='Recoupling-NV', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueW):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
valueWO = [54.2, 63.8, 68.7, 75.4, 79.2, 78.8, 79.1]
valueW = [54.6, 64.6, 69.2, 76.3, 81.7, 80.8, 80.4]
plt.plot(range(len(name)), valueWO, 'ro-', label='W/O Recoupling-THU', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueWO):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
plt.plot(range(len(name)), valueW, 'ro--', label='Recoupling-THU', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueW):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
ax.set_ylabel('Accuracy(%)',fontsize=fontsize, weight='bold')
ax.set_xlabel('(a) Epoch', fontsize=fontsize+1, weight='bold')
plt.xticks(range(len(name)), name, rotation=0, fontsize=fontsize, weight='bold')
plt.yticks(fontsize=fontsize, weight='bold')
plt.ylim(50, 90)
plt.grid()
# plt.title('(a)', fontsize=fontsize, weight='bold', y=-0.1)
plt.legend(fontsize=fontsize)
# plt.savefig(f're.pdf')
# fig = plt.figure(figsize=(11, 10))
ax1 = fig.add_subplot(122)
name = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
value = [87.4, 86.6, 89.5, 87.8, 87.2, 89.1, 88.5]
plt.plot(range(len(name)), value, 'bo-', label='Nv-Gesture', linewidth=linewidth)
for a, b in zip(range(len(name)), value):
plt.text(a, b + 0.02, '%.1f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
value = [78.8, 79.6, 79.6, 81.7, 78.8, 79.2, 77.9]
plt.plot(range(len(name)), value, 'ro-', label='THU-READ', linewidth=linewidth)
for a, b in zip(range(len(name)), value):
plt.text(a, b + 0.02, '%.1f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
ax1.set_ylabel('Accuracy(%)', fontsize=fontsize, weight='bold')
ax1.set_xlabel('(b) Temperature', fontsize=fontsize+1, weight='bold')
plt.xticks(range(len(name)), name, rotation=0, fontsize=fontsize, weight='bold')
plt.yticks(fontsize=fontsize, weight='bold')
# plt.ylim(40, 100)
plt.legend(fontsize=fontsize)
plt.grid()
# plt.title('(b)', fontsize=fontsize, weight='bold', y=-0.1)
plt.savefig(f'recoupling_temper.pdf', dpi=fig.dpi)
plt.show()
def Analysis(txt_file, types):
pattern = re.compile("{} (\d+\.\d*)".format(types)) #[\d+\.\d]*
with open(txt_file, 'r') as f:
# data =[(lambda x: [x[f'{types}'], x['epoch']])(eval(fp)) for fp in f.readlines()]
data =[list(map(float, pattern.findall(fp))) for fp in f.readlines()]
data = list(filter(lambda x: len(x)>0, data))
data = np.array(data)
return data
def plot_func(datas, names, show_value=False, KAR=False, save_file_name='default'):
fontsize = 12
fig = plt.figure(dpi=200, figsize=(5,4))
# fig = plt.figure()
# ax = fig.add_subplot()
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
ax = axisartist.Subplot(fig, 111)
fig.add_axes(ax)
ax.axis["bottom"].set_axisline_style("->", size = 2.5)
ax.axis["left"].set_axisline_style("->", size = 2.5)
ax.axis["top"].set_visible(False)
ax.axis["right"].set_visible(False)
slopes, fit_values = [], []
markers = ['o', 'v', '*', '^']
for i, (data, name) in enumerate(zip(datas, names)):
data = data[:250]
if 'acc' in name:
data = [d[0]/100 for d in data]
name = 'Test-Acc'
if 'clean_loss' in name:
name = 'Easy-Loss'
if 'hard_loss' in name:
name = 'Hard-Loss'
if 'moderate_loss' in name:
name = 'Moderate-Loss'
# data = [d[0]/10 for d in data]
if 'clean_rate' in name:
name = 'DDP-e α=0.8 β=0.3'
data = [[d[0]+0.15] for d in data]
if 'hard_rate' in name:
name = 'DDP-h α=0.8 β=0.3'
data = [[d[0]-0.05] for d in data]
print(data)
# ax.plot(list(range(len(data)))[::15], data[::15], '-', label=name, linewidth=2.0, marker=markers[i])
# if i == 2: i+=1
# if name == 'Hard-Loss':
# ax.plot([75+d*5 for d in list(range(len(data)))], [d[0]-1.0 if i >= 16 else d[0] for i, d in enumerate(data)], '-', label=name, linewidth=3.0, color=colors[i])
# elif name == 'Moderate-Loss':
# ax.plot([75+d*5 for d in list(range(len(data)))], [d[0]-0.5 if i >= 16 else d[0] for i, d in enumerate(data)], '-', label=name, linewidth=3.0, color=colors[i])
# else:
# ax.plot([75+d*5 for d in list(range(len(data)))], [d[0]-0.1 if i >= 16 else d[0] for i, d in enumerate(data)], '-', label=name, linewidth=3.0, color=colors[i])
ax.plot([d for d in list(range(len(data)))], data, '-', label=name, linewidth=3.0, color=colors[i])
#α=0.9, β=0.4
# data =
if 'DDP' in name and KAR:
# slope = [float(d[0]) - for i, d in enumerate(data) if i > 0]
# slopes.append(np.array(slope))
# slope KAR
# slope = [float(d[0]) / i for i, d in enumerate(data) if i > 0]
# slopes.append(np.array(slope))
y = np.array([float(d[0]) for d in data])
x = np.array(list(range(len(data))))
from scipy.optimize import leastsq
from sympy import symbols, diff, Symbol, lambdify
def fit_func(p, x):
f = np.poly1d(p)
return f(x)
def residuals_func(p, y, x):
ret = fit_func(p, x) - y
return ret
p_init = np.random.randn(13)
plsq = leastsq(residuals_func, p_init, args=(y, x))
fit_value = fit_func(plsq[0], x)
fit_values.append(fit_value)
y = np.poly1d(plsq[0])
deriv_func = y.deriv()
slopes.append(abs(deriv_func(x)) * 20)
if len(slopes):
deriv_value = (slopes[0] + slopes[1]) / 2.
ax.plot(x[5:-5]+5, deriv_value[5:-5], '--', label='KAR', linewidth=2., color=colors[3])
ax.plot(x[::15], fit_values[0][::15], '--', label=' LSC', linewidth=1.5, color=colors[7], marker='o')
ax.plot(x[::15], fit_values[1][::15], '--', linewidth=1.5, color=colors[7], marker='o')
plt.yticks(fontproperties='Times New Roman', size=15,weight='bold')#设置大小及加粗
plt.xticks(fontproperties='Times New Roman', size=15)
# ax.set_ylabel('value')
ax.set_xlabel('Epoch', fontsize=18, fontweight='bold')
# x_names = torch.arange(0, 299, 10)
# print(x_names)
# plt.xticks(rotation=0, fontsize=fontsize, weight='bold')
# plt.grid()
# plt.axvline(0, color=colors[7], linestyle='--', label=None)
# plt.axvline(int(args.times[0][0]), color=colors[7], linestyle='--', label=None)
# plt.axvline(int(args.times[0][1]), color=colors[7], linestyle='--', label=None)
# plt.axhline(0.2751, color=colors[8], linestyle='--', label=None)
plt.legend(fontsize=12)
print('file name:', save_file_name)
# plt.savefig('./{}.png'.format(name), dpi=fig.dpi)
plt.savefig(f'./{save_file_name}.png', dpi=fig.dpi)
def plot_Curve(args):
# file_name = args.file_name
datas, names, conversion_ratio = [], [], []
print(args.file_name)
for file_name in args.file_name[0]:
types = args.types[0]
data_root = os.path.join('../out/', file_name, 'log.txt')
# datas, names, conversion_ratio = [], [], []
print(types)
for typ in types:
pattern = re.compile("\"{}\": (\d+\.\d+)".format(typ)) #[\d+\.\d]*
with open(data_root, 'r') as f:
data =[list(map(float, pattern.findall(fp))) for fp in f.readlines()]
data = list(filter(lambda x: len(x)>0, data))
datas.append(data)
names.append(typ)
# flag = True
# for i, (_, d1, d2) in enumerate(zip(*datas)):
# # if flag:
# # same_v = d1[0]
# if round(d1[0], 2) == round(d2[0], 2):
# same_v = d1[0]
# print(same_v, i)
# input()
# flag=False
# conversion_ratio.append((abs(d1[0]-same_v))/(abs(d2[0] - same_v)))
# datas.append(conversion_ratio)
# names.append('conversion_ratio')
plot_func(datas=datas, names=names, KAR=True, save_file_name=args.save_name)
def PatchLevelErasing():
# softmax1 = [0.5125, 0.5001, 0.4968, 0.4835, 0.4654]
# clean_rate1 = [0.5597, 0.4962, 0.4091, 0.3527, 0.3128]
# hard_rate1 = [0.0860, 0.0927 , 0.1068, 0.1216, 0.1408]
# clean_rate1 = [ 0.4230, 0.4120, 0.4095, 0.4015, 0.3883 ]
# hard_rate1 = [0.1359, 0.1431, 0.1470, 0.1550, 0.1667]
#DeiT-S
# clean_rate1 = [0.4230, 0.3539, 0.3436, 0.3188, 0.2798]
# hard_rate1 = [0.1359, 0.1559, 0.1711, 0.1927, 0.2271]
# name1 = ['0%', '10%', '20%', '30%', '40%']
#Swin-T
clean_rate1 = [0.4370, 0.4258, 0.4046, 0.3845, 0.3557]
hard_rate1 = [0.1480, 0.1554, 0.1584, 0.1679, 0.1723]
name1 = ['0%', '5%', '10%', '15%', '20%']
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(dpi=200, figsize=(7,6))
ax = axisartist.Subplot(fig, 211)
fig.add_axes(ax)
ax.axis["bottom"].set_axisline_style("->", size = 2.5)
ax.axis["left"].set_axisline_style("->", size = 2.5)
ax.axis["top"].set_visible(False)
ax.axis["right"].set_visible(False)
# ax.plot(softmax1, 'r-', label='$p_k$', linewidth=2.0, marker='o')
ax.plot(clean_rate1, 'g-', label='DDP-e α=0.8 β=0.3', linewidth=2.0, marker='x')
ax.plot(hard_rate1, 'b-', label='DDP-h α=0.8 β=0.3', linewidth=2.0, marker='v')
ax.set_xticks(range(len(name1)))
ax.set_xticklabels(name1, rotation=0, fontsize='small')
ax.set_xlabel('PatchErasing', fontweight='bold')
# ax.set_ylabel('$p_k$', fontweight='bold')
plt.legend()
#====================================================================
# Deit-B
# clean_rate1 = [0.3527, 0.3457, 0.3405, 0.3346, 0.3285]
# clean_rate1 = [0.3527, 0.3257, 0.3005, 0.2846, 0.2485]
# hard_rate1 = [0.1216, 0.1216 , 0.1216, 0.1216, 0.1216]
# # softmax1 = [0.8652, 0.8776, 0.8739, 0.8658, 0.8553]
# softmax1 = [0.8652, 0.8576, 0.8039, 0.7658, 0.7053]
# name1 = ['30%+0', '30%+10', '30%+20', '30%+30', '30%+40']
# Deit-S
# clean_rate1 = [0.3188, 0.2777, 0.2500, 0.2300, 0.2091]
# hard_rate1 = [0.1927, 0.1902, 0.1927, 0.1927, 0.1927]
# softmax1 = [0.8627, 0.8519, 0.8318, 0.8098, 0.7818]
# name1 = ['30%+0', '30%+10', '30%+20', '30%+30', '30%+40']
# Swin-T
clean_rate1 = [0.3845, 0.3725, 0.3655, 0.3400, 0.3191]
hard_rate1 = [0.1679, 0.1682, 0.1727, 0.1797, 0.1805]
softmax1 = [0.8627, 0.8519, 0.8418, 0.8198, 0.7918]
name1 = ['15%+0', '15%+4', '15%+5', '15%+6', '15%+7']
ax = axisartist.Subplot(fig, 212)
fig.add_axes(ax)
# ax.axis["bottom"].set_axisline_style("->", size = 2.5)
# ax.axis["left"].set_axisline_style("->", size = 2.5)
# ax.axis["top"].set_visible(False)
# ax.axis["right"].set_visible(False)
l3, = ax.plot(softmax1, 'r-', label='$p_k$', linewidth=2.0, marker='o')
# ax.plot(clean_rate1, 'g-', label='DDP-e', linewidth=2.0, marker='x')
# ax.plot(hard_rate1, 'b-', label='DDP-h', linewidth=2.0, marker='v')
ax.set_xticks(range(len(name1)))
ax.set_xticklabels(name1, rotation=0, fontsize='small')
ax.set_xlabel('PatchErasing+AutoErasing', fontweight='bold')
ax.set_ylabel('$p_k$', fontweight='bold', fontsize='small')
ax2 = ax.twinx()
l1, = ax2.plot(clean_rate1, 'g-', label='DDP-e α=0.8 β=0.3', linewidth=2.0, marker='x')
# ax2.bar(range(len(clean_rate1)), clean_rate1, width=0.3, label='DDP-e', color=sns.xkcd_rgb["green"])
l2, = ax2.plot(hard_rate1, 'b-', label='DDP-h α=0.8 β=0.3', linewidth=2.0, marker='v')
ax2.set_ylabel('DDP', fontweight='bold')
plt.legend(handles=[l1, l2, l3])
# plt.tight_layout()
plt.savefig('./PatchLevelErasing-Swin-T.pdf', dpi=fig.dpi)
data2 = []
sys.exit(0)
def SwinShow():
# fp = open('/home/admin/workspace/Code/Swin-Transformer/output/swin_tiny_patch4_window7_224/baseline-DDP/swin_tiny_patch4_window7_224/default/log_rank0.txt', 'r')
fp = open('/home/admin/workspace/Code/Swin-Transformer/output/swin_base_patch4_window7_224/DDP3/swin_base_patch4_window7_224/default/log_rank0.txt', 'r')
clean_rates, hard_rates = [], []
for ln in fp:
# print(ln)
if '[1250/1251]' in ln and 'clean_rate' in ln:
print(re.findall(r"INFO Train: (\[\d+/\d+\])", ln))
try:
clean_rate = re.findall(r"clean_rate_6 (\d+\.\d+) \((\d+\.\d+)\)", ln)[-1]
hard_rate = re.findall(r"hard_rate_1 (\d+\.\d+) \((\d+\.\d+)\)", ln)[-1]
except:
continue
clean_rates.append(list(map(float, clean_rate)))
hard_rates.append(list(map(float, hard_rate)))
clean_rates, hard_rates = list(filter(lambda x: len(x)>0, [clean_rates, hard_rates]))
clean_rates, hard_rates = [[d[-1]] for d in clean_rates], [[d[-1]] for d in hard_rates]
datas = [clean_rates, hard_rates]
names = ['clean_rate', 'hard_rates']
plot_func(datas=datas, names=names, KAR=True)
sys.exit(0)
def ExperimentAnaylize(x, y, label, step=1, save_path='./test'):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(dpi=200, figsize=(7,6))
ax = axisartist.Subplot(fig, 111)
fig.add_axes(ax)
# ax.axis["bottom"].set_axisline_style("->", size = 2.5)
# ax.axis["left"].set_axisline_style("->", size = 2.5)
# ax.axis["top"].set_visible(False)
# ax.axis["right"].set_visible(False)
ax2 = ax.twinx()
hand_labl = []
for i, (d, l) in enumerate(zip(x, label)):
d = d.tolist()
if 'acc' in l:
d = [l/100 for l in d]
elif 'L' in l or 'loss' in l:
l2, = ax2.plot(y[0::step], d[0::step], '--', label=l, linewidth=2.0, marker='o', color=colors[i])
hand_labl.append(l2)
continue
l1, = ax.plot(y[0::step], d[0::step], '--', label=l, linewidth=2.0, marker='x', color=colors[i])
hand_labl.append(l1)
# ax.set_xticklabels(y, rotation=0, fontsize='small')
ax.set_xlabel('Epoch', fontweight='bold')
ax.set_ylabel('DDP', fontweight='bold')
ax2.set_ylabel('Loss', fontweight='bold')
plt.legend(handles=hand_labl)
plt.savefig(f'{save_path}.png', dpi=fig.dpi)
def Txt2Analysis(txt_file, types):
datas = []
for typ in types[0]:
with open(txt_file, 'r') as f:
datas.append([eval(fp)[f'{typ}'] if typ in fp else 0.0 for fp in f.readlines()])
with open(txt_file, 'r') as f:
epochs = [eval(fp)['epoch'] for fp in f.readlines()]
datas = np.array(datas)
return datas, epochs, types[0]
if __name__ == '__main__':
# PatchLevelErasing()
# SwinShow()
# parser = argparse.ArgumentParser()
# parser.add_argument('--file-name', nargs='*', action='append', default=[])
# parser.add_argument('--types', nargs='*', action='append', default=[])
# parser.add_argument('--times', nargs='*', action='append', default=[])
# parser.add_argument('--save-name', default='')
# args = parser.parse_args()
# # plot_Curve(args)
# ExperimentAnaylize(*Txt2Analysis(args.file_name[0][0], args.types), step=10)
# sys.exit(0)
model = '{}'.format(sys.argv[1])
types = sys.argv[2]
name = [
['THUREAD1', '/home/admin/workspace/Code/MotionRGBD-PAMI/Checkpoints/THUREAD1/log20220608-120534.txt'],
['THUREAD1-mixup', '/home/admin/workspace/Code/MotionRGBD-PAMI/Checkpoints/THUREAD1-mixup/log20220609-014207.txt'],
]
data = []
for n, d in name:
try:
data.append([n, Analysis(d, types)])
except Exception as e:
print(e)
continue
plot_curve(datas=data, flag='{}_{}'.format(model, types))
| 20,739 | 39.826772 | 187 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/evaluate_metric.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
# -------------------
# import modules
# -------------------
import random, os
import numpy as np
import cv2
import heapq
import shutil
from textwrap import wrap
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimage
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, auc, roc_curve, roc_auc_score
import seaborn as sns
from torchvision import transforms
from PIL import Image
import torch
from torchvision.utils import save_image, make_grid
acc_figs = []
con_figs = []
# ---------------------------------------
# Plot Confusion Matrix
# ---------------------------------------
def plot_confusion_matrix(PREDICTIONS_PATH, grounds, preds, categories, idx, top=20):
print("--------------------------------------------")
print("Confusion Matrix")
print("--------------------------------------------")
super_category = str(idx)
num_cat = []
for ind, cat in enumerate(categories):
print("Class {0} : {1}".format(ind, cat))
num_cat.append(ind)
print()
numclass = len(num_cat)
cm = confusion_matrix(grounds, preds, labels=num_cat)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot()
sns.heatmap(cm, annot=False if numclass > 60 else True, fmt='g', ax=ax); # annot=True to annotate cells, ftm='g' to disable scientific notation
# labels, title and ticks
ax.set_title('Confusion Matrix - ' + super_category, fontsize=20)
ax.set_xlabel('Predicted labels', fontsize=16)
ax.set_ylabel('True labels', fontsize=16)
ax.set_xticks(range(0,len(num_cat), 1))
ax.set_yticks(range(0,len(num_cat), 1))
ax.xaxis.set_ticklabels(num_cat)
ax.yaxis.set_ticklabels(num_cat)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
plt.pause(0.1)
fig.savefig(os.path.join(PREDICTIONS_PATH, "confusion_matrix"), dpi=fig.dpi)
# img = Image.open(os.path.join(PREDICTIONS_PATH, "confusion_matrix.png"))
# con_figs.append(img)
# if len(con_figs) > 1:
# con_figs[0].save(os.path.join(PREDICTIONS_PATH, "confusion_matrix.gif"), save_all=True, append_images=con_figs[1:], duration=1000, loop=0)
plt.close()
# -------------------------------------------------
# Plot Accuracy and Precision
# -------------------------------------------------
Accuracy = [(cm[i, i] / sum(cm[i, :])) * 100 if sum(cm[i, :]) != 0 else 0.000001 for i in range(cm.shape[0])]
Precision = [(cm[i, i] / sum(cm[:, i])) * 100 if sum(cm[:, i]) != 0 else 0.000001 for i in range(cm.shape[1])]
fig = plt.figure(figsize=(int((numclass*3)%300), 8))
ax = fig.add_subplot()
bar_width = 0.4
x = np.arange(len(Accuracy))
b1 = ax.bar(x, Accuracy, width=bar_width, label='Accuracy', color=sns.xkcd_rgb["pale red"], tick_label=x)
ax2 = ax.twinx()
b2 = ax2.bar(x + bar_width, Precision, width=bar_width, label='Precision', color=sns.xkcd_rgb["denim blue"])
average_acc = sum(Accuracy)/len(Accuracy)
average_prec = sum(Precision)/len(Precision)
b3 = plt.hlines(y=average_acc, xmin=-bar_width, xmax=numclass - 1 + bar_width * 2, linewidth=2, linestyles='--', color='r',
label='Average Acc : %0.2f' % average_acc)
b4 = plt.hlines(y=average_prec, xmin=-bar_width, xmax=numclass - 1 + bar_width * 2, linewidth=2, linestyles='--', color='b',
label='Average Prec : %0.2f' % average_prec)
plt.xticks(np.arange(numclass) + bar_width / 2, np.arange(numclass))
# labels, title and ticks
ax.set_title('Accuracy and Precision Epoch #{}'.format(idx), fontsize=20)
ax.set_xlabel('labels', fontsize=16)
ax.set_ylabel('Acc(%)', fontsize=16)
ax2.set_ylabel('Prec(%)', fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.tick_params(axis='y', colors=b1[0].get_facecolor())
ax2.tick_params(axis='y', colors=b2[0].get_facecolor())
plt.legend(handles=[b1, b2, b3, b4])
# fig.savefig(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision_{}.png".format(idx)), dpi=fig.dpi)
fig.savefig(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision.png"), dpi=fig.dpi)
# img = Image.open(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision.png"))
# acc_figs.append(img)
# if len(acc_figs) > 1:
# acc_figs[0].save(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision.gif"), save_all=True, append_images=acc_figs[1:], duration=1000, loop=0)
plt.close()
TopK_idx_acc = heapq.nlargest(top, range(len(Accuracy)), Accuracy.__getitem__)
TopK_idx_prec = heapq.nlargest(top, range(len(Precision)), Precision.__getitem__)
TopK_low_idx = heapq.nsmallest(top, range(len(Precision)), Precision.__getitem__)
print('=' * 80)
print('Accuracy Tok {0}: \n'.format(top))
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in TopK_idx_acc:
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('-' * 80)
print('Precision Tok {0}: \n'.format(top))
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in TopK_idx_prec:
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('=' * 80)
return TopK_low_idx
def EvaluateMetric(PREDICTIONS_PATH, train_results, idx):
TopK_low_idx = plot_confusion_matrix(PREDICTIONS_PATH, train_results['grounds'], train_results['preds'], train_results['categories'], idx)
| 5,573 | 39.100719 | 150 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/utils.py | '''
This file is modified from:
https://github.com/yuhuixu1993/PC-DARTS/blob/master/utils.py
'''
import os
import numpy as np
import torch
import torch.distributed as dist
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import OrderedDict
import random
from .build import SoftTargetCrossEntropy
#------------------------
# evaluation metrics
#------------------------
from sklearn.decomposition import PCA
from sklearn import manifold
import pandas as pd
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
from torchvision.utils import save_image, make_grid
from PIL import Image
import cv2
from einops import rearrange, repeat
class ClassAcc():
def __init__(self, GESTURE_CLASSES):
self.class_acc = dict(zip([i for i in range(GESTURE_CLASSES)], [0]*GESTURE_CLASSES))
self.single_class_num = [0]*GESTURE_CLASSES
def update(self, logits, target):
pred = torch.argmax(logits, dim=1)
for p, t in zip(pred.cpu().numpy(), target.cpu().numpy()):
if p == t:
self.class_acc[t] += 1
self.single_class_num[t] += 1
def result(self):
return [round(v / (self.single_class_num[k]+0.000000001), 4) for k, v in self.class_acc.items()]
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def adjust_learning_rate(optimizer, step, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
df = 0.7
ds = 40000.0
lr = lr * np.power(df, step / ds)
# lr = args.lr * (0.1**(epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# def accuracy(output, target, topk=(1,)):
# maxk = max(topk)
# batch_size = target.size(0)
# _, pred = output.topk(maxk, 1, True, True)
# pred = pred.t()
# correct = pred.eq(target.view(1, -1).expand_as(pred))
# res = []
# for k in topk:
# correct_k = correct[:k].view(-1).float().sum(0)
# res.append(correct_k.mul_(100.0/batch_size))
# return res
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = min(max(topk), output.size()[1])
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def calculate_accuracy(outputs, targets):
with torch.no_grad():
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1))
correct_k = correct.view(-1).float().sum(0, keepdim=True)
#n_correct_elems = correct.float().sum().data[0]
# n_correct_elems = correct.float().sum().item()
# return n_correct_elems / batch_size
return correct_k.mul_(1.0 / batch_size)
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def count_learnable_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if v.requires_grad)/1e6
def save_checkpoint(state, is_best=False, save='./', filename='checkpoint.pth.tar'):
filename = os.path.join(save, filename)
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint(model, model_path, optimizer=None, scheduler=None):
# checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage.cuda(4))
checkpoint = torch.load(model_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
if scheduler is not None:
scheduler.load_state_dict(checkpoint['scheduler'])
epoch = checkpoint['epoch']
bestacc = checkpoint['bestacc']
return epoch, bestacc
def load_pretrained_checkpoint(model, model_path):
# params = torch.load(model_path, map_location=lambda storage, loc: storage.cuda(local_rank))['model']
params = torch.load(model_path, map_location='cpu')['model']
new_state_dict = OrderedDict()
for k, v in params.items():
name = k[7:] if k[:7] == 'module.' else k
# if name not in ['dtn.mlp_head_small.2.bias', "dtn.mlp_head_small.2.weight",
# 'dtn.mlp_head_media.2.bias', "dtn.mlp_head_media.2.weight",
# 'dtn.mlp_head_large.2.bias', "dtn.mlp_head_large.2.weight"]:
# if v.shape == model.state_dict()[name].shape:
try:
if v.shape == model.state_dict()[name].shape and name not in ['dtn.multi_scale_transformers.0.3.2.weight', 'dtn.multi_scale_transformers.0.3.2.bias', 'dtn.multi_scale_transformers.1.3.2.weight', 'dtn.multi_scale_transformers.1.3.2.bias', 'dtn.multi_scale_transformers.2.3.2.weight', 'dtn.multi_scale_transformers.2.3.2.bias']:
new_state_dict[name] = v
except:
continue
ret = model.load_state_dict(new_state_dict, strict=False)
print('Missing keys: \n', ret.missing_keys)
# return model
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
if not os.path.exists(os.path.join(path, 'scripts')):
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def uniform_sampling(clips_num, sn, random=True):
if random:
f = lambda n: [(lambda n, arr: n if arr == [] else np.random.choice(arr))(n * i / sn,
range(int(n * i / sn),
max(int(n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
else:
f = lambda n: [(lambda n, arr: n if arr == [] else int(np.mean(arr)))(n * i / sn, range(int(n * i / sn),
max(int(
n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
return f(clips_num)
class DINOLoss(torch.nn.Module):
def __init__(self, args, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.args = args
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
self.CE = SoftTargetCrossEntropy()
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
[ori_logits, ori_xs, ori_xm, ori_xl], [ori_logits_flip, ori_xs_flip, ori_xm_flip, ori_xl_flip] = teacher_output
[color_logits, cxs, cxm, cxl], lam_mix = student_output
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
ori_logits, ori_xs, ori_xm, ori_xl = map(lambda x: torch.softmax((x - self.center) / temp, dim=-1), [ori_logits, ori_xs, ori_xm, ori_xl])
ori_logits_flip, ori_xs_flip, ori_xm_flip, ori_xl_flip = map(lambda x: torch.softmax((x - self.center) / temp , dim=-1), [ori_logits_flip, ori_xs_flip, ori_xm_flip, ori_xl_flip])
logits_t = lam_mix * ori_logits + (1. - lam_mix) * ori_logits_flip
logits_xs_t = lam_mix * ori_xs + (1. - lam_mix) * ori_xs_flip
logits_xm_t = lam_mix * ori_xm + (1. - lam_mix) * ori_xm_flip
logits_xl_t = lam_mix * ori_xl + (1. - lam_mix) * ori_xl_flip
# color_logits, cxs, cxm, cxl = map(lambda x: torch.softmax(x / self.student_temp, dim=-1), [color_logits, cxs, cxm, cxl])
color_logits, cxs, cxm, cxl = map(lambda x: x / self.student_temp, [color_logits, cxs, cxm, cxl])
Total_loss = 0.0
CE = self.CE
if self.args.MultiLoss:
lamd1, lamd2, lamd3, lamd4 = map(float, self.args.loss_lamdb)
CE_loss = lamd1*CE(color_logits, logits_t) + lamd2*CE(cxs, logits_xs_t) + \
lamd3*CE(cxm, logits_xm_t) + lamd4*CE(cxl, logits_xl_t)
else:
CE_loss = CE(color_logits, logits_t)
Total_loss += CE_loss
self.update_center(logits_t)
return Total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
# ema update
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
return y1 * lam + y2 * (1. - lam)
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def Visfeature(args, model, inputs, v_path=None, weight_softmax=None, FusionNet=False):
# TSNE cluster
if FusionNet:
# pca_data = model.pca_data.detach().cpu()
# targets = model.target_data.cpu()
pca_data, targets = model.get_cluster_visualization()
tsne = manifold.TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
data = pd.DataFrame(pca_data.cpu().numpy())
data_pca = tsne.fit_transform(data)
data.insert(0, 'label', pd.DataFrame(targets.cpu().numpy()))
fig, ax = plt.subplots()
scatter = ax.scatter(data_pca[:, 0], data_pca[:, 1], c=data['label'], s=25, cmap='rainbow',
alpha=0.8, edgecolors='none')
legend1 = ax.legend(*scatter.legend_elements(fmt="{x:.0f}"),
loc="best", title="Feature Type")
ax.add_artist(legend1)
fig.savefig(args.save + '/cluster-result.png')
plt.close()
return
if args.visdom['enable']:
vis.featuremap('CNNVision',
torch.sum(make_grid(feature[0].detach(), nrow=int(feature[0].size(0) ** 0.5), padding=2), dim=0).flipud())
vis.featuremap('Attention Maps Similarity',
make_grid(feature[1], nrow=int(feature[1].detach().cpu().size(0) ** 0.5), padding=2)[0].flipud())
vis.featuremap('Enhancement Weights', feature[3].flipud())
else:
# fig = plt.figure()
# ax = fig.add_subplot()
# sns.heatmap(
# torch.sum(make_grid(feature[0].detach(), nrow=int(feature[0].size(0) ** 0.5), padding=2), dim=0).cpu().numpy(),
# annot=False, fmt='g', ax=ax)
# ax.set_title('CNNVision', fontsize=10)
# fig.savefig(os.path.join(args.save, 'CNNVision.jpg'), dpi=fig.dpi)
# plt.close()
# fig = plt.figure()
# ax = fig.add_subplot()
# sns.heatmap(make_grid(feature[1].detach(), nrow=int(feature[1].size(0) ** 0.5), padding=2)[0].cpu().numpy(), annot=False,
# fmt='g', ax=ax)
# ax.set_title('Attention Maps Similarity', fontsize=10)
# fig.savefig(os.path.join(args.save, 'AttMapSimilarity.jpg'), dpi=fig.dpi)
# plt.close()
fig = plt.figure()
ax = fig.add_subplot()
# visweight = model.visweight
feat, visweight = model.get_visualization()
sns.heatmap(visweight.detach().cpu().numpy(), annot=False, fmt='g', ax=ax)
ax.set_title('Enhancement Weights', fontsize=10)
fig.savefig(os.path.join(args.save, 'EnhancementWeights.jpg'), dpi=fig.dpi)
plt.close()
#------------------------------------------
# Spatial feature visualization
#------------------------------------------
headmap = feat.detach().cpu().numpy()
headmap = np.mean(headmap, axis=1)
headmap /= np.max(headmap)
headmap = torch.from_numpy(headmap)
b, c, t, h, w = inputs.shape
inputs = inputs.permute(2, 0, 1, 3, 4) #.view(t, b, c, h, w)
imgs = []
for img in inputs:
img = make_grid(img[:16], nrow=4, padding=2).unsqueeze(0)
imgs.append(img)
imgs = torch.cat(imgs)
b, t, h, w = headmap.shape
headmap = headmap.permute(1, 0, 2, 3).unsqueeze(2) # .view(t, b, 1, h, w)
heatmaps = []
for heat in headmap:
heat = make_grid(heat[:16], nrow=4, padding=2)[0].unsqueeze(0)
heatmaps.append(heat)
heatmaps = torch.cat(heatmaps)
# feat = model.feat
# headmap = feat[0,:].detach().cpu().numpy()
# headmap = np.mean(headmap, axis=0)
# headmap /= np.max(headmap) # torch.Size([64, 7, 7])
# headmap = torch.from_numpy(headmap)
# img = inputs[0]
result_gif, result = [], []
for cam, mg in zip(heatmaps.unsqueeze(1), imgs.permute(0,2,3,1)):
# cam = torch.argmax(weight_softmax[0]).detach().cpu().dot(cam)
cam = cv2.resize(cam.squeeze().cpu().numpy(), (mg.shape[0]//2, mg.shape[1]//2))
cam = np.uint8(255 * cam)
cam = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
mg = np.uint8(mg.cpu().numpy() * 128 + 127.5)
mg = cv2.resize(mg, (mg.shape[0]//2, mg.shape[1]//2))
superimposed_img = cv2.addWeighted(mg, 0.4, cam, 0.6, 0)
result_gif.append(Image.fromarray(superimposed_img))
result.append(torch.from_numpy(superimposed_img).unsqueeze(0))
superimposed_imgs = torch.cat(result).permute(0, 3, 1, 2)
# save_image(superimposed_imgs, os.path.join(args.save, 'CAM-Features.png'), nrow=int(superimposed_imgs.size(0) ** 0.5), padding=2).permute(1,2,0)
superimposed_imgs = make_grid(superimposed_imgs, nrow=int(superimposed_imgs.size(0) ** 0.5), padding=2).permute(1,2,0)
cv2.imwrite(os.path.join(args.save, 'CAM-Features.png'), superimposed_imgs.numpy())
# save augmentad frames as gif
result_gif[0].save(os.path.join(args.save, 'CAM-Features.gif'), save_all=True, append_images=result_gif[1:], duration=100, loop=0)
if args.eval_only and args.visdom['enable']:
MHAS_s, MHAS_m, MHAS_l = feature[-2]
MHAS_s, MHAS_m, MHAS_l = MHAS_s.detach().cpu(), MHAS_m.detach().cpu(), MHAS_l.detach().cpu()
# Normalize
att_max, index_max = torch.max(MHAS_s.view(MHAS_s.size(0), -1), dim=-1)
att_min, index_min = torch.min(MHAS_s.view(MHAS_s.size(0), -1), dim=-1)
MHAS_s = (MHAS_s - att_min.view(-1, 1, 1))/(att_max.view(-1, 1, 1) - att_min.view(-1, 1, 1))
att_max, index_max = torch.max(MHAS_m.view(MHAS_m.size(0), -1), dim=-1)
att_min, index_min = torch.min(MHAS_m.view(MHAS_m.size(0), -1), dim=-1)
MHAS_m = (MHAS_m - att_min.view(-1, 1, 1))/(att_max.view(-1, 1, 1) - att_min.view(-1, 1, 1))
att_max, index_max = torch.max(MHAS_l.view(MHAS_l.size(0), -1), dim=-1)
att_min, index_min = torch.min(MHAS_l.view(MHAS_l.size(0), -1), dim=-1)
MHAS_l = (MHAS_l - att_min.view(-1, 1, 1))/(att_max.view(-1, 1, 1) - att_min.view(-1, 1, 1))
mhas_s = make_grid(MHAS_s.unsqueeze(1), nrow=int(MHAS_s.size(0) ** 0.5), padding=2)[0]
mhas_m = make_grid(MHAS_m.unsqueeze(1), nrow=int(MHAS_m.size(0) ** 0.5), padding=2)[0]
mhas_l = make_grid(MHAS_l.unsqueeze(1), nrow=int(MHAS_l.size(0) ** 0.5), padding=2)[0]
vis.featuremap('MHAS Map', mhas_l)
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(131)
sns.heatmap(mhas_s.squeeze(), annot=False, fmt='g', ax=ax, yticklabels=False)
ax.set_title('\nMHSA Small', fontsize=10)
ax = fig.add_subplot(132)
sns.heatmap(mhas_m.squeeze(), annot=False, fmt='g', ax=ax, yticklabels=False)
ax.set_title('\nMHSA Medium', fontsize=10)
ax = fig.add_subplot(133)
sns.heatmap(mhas_l.squeeze(), annot=False, fmt='g', ax=ax, yticklabels=False)
ax.set_title('\nMHSA Large', fontsize=10)
plt.suptitle('{}'.format(v_path[0].split('/')[-1]), fontsize=20)
fig.savefig('demo/{}-MHAS.jpg'.format(args.save.split('/')[-1]), dpi=fig.dpi)
plt.close()
def feature_embedding(x, target, embedding_dict):
temp_out, target_out = x
if temp_out is None:
x_gather = concat_all_gather(target_out)
target_gather = concat_all_gather(target.cuda())
for name, v in zip(target_gather, x_gather):
embedding_dict[name] = v
else:
class_embedding = torch.cat([target_out[i][-1].unsqueeze(-1) for i in range(len(target_out))], dim=-1).mean(-1)
embedding_gather = concat_all_gather(class_embedding)
target_gather = concat_all_gather(target.cuda())
# embedding_dict = OrderedDict()
for name, v in zip(target_gather, embedding_gather):
embedding_dict[name] = v
| 21,656 | 41.216374 | 338 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/mixup.py | """
This file is modified from:
https://github.com/rwightman/pytorch-image-models/blob/main/timm/data/mixup.py
Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2019, Ross Wightman
"""
import numpy as np
import torch
from einops import rearrange, repeat
import random
from .shufflemix import *
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
return y1 * lam + y2 * (1. - lam)
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=None):
""" Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)
cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):
""" Generate bbox and apply lambda correction.
"""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000, args=None):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
self.bata_range = torch.linspace(0.0, 0.5, steps=args.epochs)
self.args = args
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=np.bool)
if self.mixup_enabled:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
elif self.cutmix_alpha > 0.:
use_cutmix = np.ones(batch_size, dtype=np.bool)
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
if self.args.mixup_dynamic:
lam = np.random.uniform(0, float(self.bata_range[self.args.epoch]))
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
if np.random.rand() < self.args.smprob:
lam = np.random.beta(self.args.shufflemix, self.args.shufflemix)
if self.args.smixmode == 'sm':
replace_idx = random.sample(range(0, x.size(2)), round((1. - lam)*x.size(2)))
x[i, :, replace_idx, :, :] = x_orig[j, :, replace_idx, :, :]
flip_idx = [i for i in range(x.size(2)) if i not in replace_idx]
x[j, :, flip_idx, :, :] = x_orig[i, :, flip_idx, :, :]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x, target):
"""
:param x: x.shape = (batch_size, 3, seq_len, 224, 224)
"""
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, :, yl:yh, xl:xh] = x.flip(0)[:, :, :, yl:yh, xl:xh]
else:
if self.args.MixIntra:
lam = MixIntra(x, lam, target, replace_prob=self.args.replace_prob)
return lam
if self.args.tempMix:
lam = TempMix(x, self.args.mixup)
return lam
if np.random.rand() < self.args.smprob:
lam = self._shufflemix_batch(x)
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam
def _shufflemix_batch(self, x):
lam = np.random.beta(self.args.shufflemix, self.args.shufflemix)
if self.args.smixmode == 'sm':
ShuffleMix(x, lam)
elif self.args.smixmode == 'sm_v1':
ShuffleMix_v1(x, lam)
elif self.args.smixmode == 'sm_v2':
ShuffleMix_v2(x, lam)
elif self.args.smixmode == 'sm_v3':
ShuffleMix_v3(x, lam)
elif self.args.smixmode == 'mu_sm':
Mixup_ShuffleMix(x, lam)
else:
raise Exception (f'No ShuffleMix strategy {self.args.smixmode} be found !')
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x, target)
if self.args.tempMix:
targets = []
for l in lam:
tar = mixup_target(target, self.num_classes, l, self.label_smoothing, x.device)
targets.append(tar.unsqueeze(-1))
target = torch.cat(targets, dim=-1).transpose(1,2)
target = target.mean(1)
else:
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)
return x, target
class FastCollateMixup(Mixup):
""" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch
A Mixup impl that's performed while collating the batches.
"""
def _mix_elem_collate(self, output, batch, half=False):
batch_size = len(batch)
num_elem = batch_size // 2 if half else batch_size
assert len(output) == num_elem
lam_batch, use_cutmix = self._params_per_elem(num_elem)
for i in range(num_elem):
j = batch_size - i - 1
lam = lam_batch[i]
mixed = batch[i][0]
if lam != 1.:
if use_cutmix[i]:
if not half:
mixed = mixed.copy()
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
if half:
lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_pair_collate(self, output, batch):
batch_size = len(batch)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
mixed_i = batch[i][0]
mixed_j = batch[j][0]
assert 0 <= lam <= 1.0
if lam < 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
patch_i = mixed_i[:, yl:yh, xl:xh].copy()
mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]
mixed_j[:, yl:yh, xl:xh] = patch_i
lam_batch[i] = lam
else:
mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam)
mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam)
mixed_i = mixed_temp
np.rint(mixed_j, out=mixed_j)
np.rint(mixed_i, out=mixed_i)
output[i] += torch.from_numpy(mixed_i.astype(np.uint8))
output[j] += torch.from_numpy(mixed_j.astype(np.uint8))
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_batch_collate(self, output, batch):
batch_size = len(batch)
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
for i in range(batch_size):
j = batch_size - i - 1
mixed = batch[i][0]
if lam != 1.:
if use_cutmix:
mixed = mixed.copy() # don't want to modify the original while iterating
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
return lam
def __call__(self, batch, _=None):
batch_size = len(batch)
assert batch_size % 2 == 0, 'Batch size should be even when using this'
half = 'half' in self.mode
if half:
batch_size //= 2
output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
if self.mode == 'elem' or self.mode == 'half':
lam = self._mix_elem_collate(output, batch, half=half)
elif self.mode == 'pair':
lam = self._mix_pair_collate(output, batch)
else:
lam = self._mix_batch_collate(output, batch)
target = torch.tensor([b[1] for b in batch], dtype=torch.int64)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu')
target = target[:batch_size]
return output, target
| 17,254 | 44.890957 | 120 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/shufflemix.py | import numpy as np
import torch
import random
def Vmixup(x, lam):
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
def ShuffleMix(x, lam):
x_flipped = x.flip(0)
replace_idx = random.sample(range(0, x.size(2)), round((1. - lam)*x.size(2)))
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
def ShuffleMix_v1(x, lam):
x_flipped = x.flip(0)
length = x.size(2)
start = random.sample([0, 1], 1)[0]
a = torch.arange(start, length, step=2)
v_len = int(length*(1. - lam))
replace_num = min(v_len, length-v_len)
if len(a)-replace_num:
b = random.sample(range(0, len(a)-replace_num), 1)[0]
else:
b = 0
replace_idx = a[b:b+replace_num]
if v_len <= length-v_len:
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
else:
x_flipped[:, :, replace_idx, :, :] = x[:, :, replace_idx, :, :]
replace_idx = torch.arange(0, x.size(2), step=1)
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
def ShuffleMix_v2(x, lam):
x_flipped = x.flip(0)
# replace_idx = random.sample(range(0, x.size(2)), int((1. - lam)*x.size(2)))
length = max(1, int((1. - lam)*x.size(2)))
# uni_idx = uniform_sampling(x.size(2), length, random=True)
if x.size(2) != length:
start = random.sample(range(0, x.size(2) - length), 1)[0]
replace_idx = torch.arange(start, start+length, step=1)
# x[:, :, replace_idx, :, :] = x[:, :, replace_idx, :, :].mul_(lam).add_(x_flipped[:, :, uni_idx, :, :].mul_(1. - lam))
# x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
x[:, :, -len(replace_idx):] = x_flipped[:, :, replace_idx, :, :]
else:
x = x_flipped
def ShuffleMix_v3(x, lam):
x_flipped = x.flip(0)
length = int((1. - lam)*x_flipped.size(2))
# if length:
# x[:, :, -length:, :, :] = x_flipped[:, :, :length, :, :]
uni_idx = uniform_sampling(x_flipped.size(2), length, random=True)
x1 = x_flipped[:, :, uni_idx, :, :]
length = x.size(2) - length
uni_idx = uniform_sampling(x.size(2), length, random=True)
x2 = x[:, :, uni_idx, :, :]
x_cat = torch.cat((x2, x1), dim=2)
replace_idx = torch.arange(0, x.size(2), step=1)
x[:, :, replace_idx, :, :] = x_cat[:, :, replace_idx, :, :]
# uni_idx = uniform_sampling(x_flipped.size(2), length, random=True)
# x1 = x_flipped[:, :, uni_idx, :, :]
# length = x.size(2) - length
# uni_idx = uniform_sampling(x.size(2), length, random=True)
# x2 = x[:, :, uni_idx, :, :]
# start = random.sample([0, length-1], 1)[0]
# if start == 0:
# x_cat = torch.cat((x1, x2), dim=2)
# else:
# x_cat = torch.cat((x2, x1), dim=2)
# # x_cat = torch.cat((x2, x1), dim=2)
# assert x.size(2) == x_flipped.size(2), f'x size {x.size(2)} must match with raw size {x_flipped.size(2)}'
# replace_idx = torch.arange(0, x.size(2), step=1)
# x[:, :, replace_idx, :, :] = x_cat[:, :, replace_idx, :, :]
def Mixup_ShuffleMix(x, lam):
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
replace_idx = random.sample(range(0, x.size(2)), int((1. - lam)*x.size(2)))
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
def TempMix(x, lam):
lam = np.random.beta(lam, lam, size=x.size(2))
lam = torch.from_numpy(lam)[None, None, :, None, None].cuda()
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam.squeeze()
def ShuffleMix_plus(x, lam, smprob):
# x: torch.Size([16, 3, 16, 224, 224])
lam = torch.tensor(lam).view(-1).expand(x.size(2)).clone().cuda()
lam_flipped = 1.0 - lam
replace_idx = random.sample(range(0, x.size(2)), round(smprob*x.size(2)))
lam[replace_idx] = lam_flipped[replace_idx]
lam_flipped = 1.0 - lam
x_flipped = x.flip(0).mul_(lam_flipped.view(1, 1, -1, 1, 1))
x.mul_(lam.view(1, 1, -1, 1, 1)).add_(x_flipped)
return lam.mean()
def MixIntra(x, lam, target, replace_prob):
# gather from all gpus
batch_size_this = x.shape[0]
# x_gather = concat_all_gather(x)
# target_gather = concat_all_gather(target)
# batch_size_all = x_gather.shape[0]
# num_gpus = batch_size_all // batch_size_this
labes = np.unique(target.cpu().numpy())
label_dict = dict([(t, []) for t in labes])
for idx, t in enumerate(target.tolist()):
label_dict[t].append(idx)
indx_list = [random.choice(label_dict[t]) for t in target.tolist()]
x_intra = x[indx_list]
replace_idx = random.sample(range(0, x.size(2)), round(replace_prob*x.size(2)))
x[:, :, replace_idx, :, :] = x_intra[:, :, replace_idx, :, :]
# x_flipped = x_gather.flip(0).mul_(1. - lam)
# x_gather.mul_(lam).add_(x_flipped)
# random shuffle index
# idx_shuffle = torch.arange(batch_size_all).cuda()
# # broadcast tensor to all gpus
# torch.distributed.broadcast(idx_shuffle, src=0)
# # shuffled index for this gpu
# gpu_idx = torch.distributed.get_rank()
# idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
# x[torch.arange(batch_size_this)] = x_gather[idx_this]
return 0.9
| 5,200 | 35.626761 | 127 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/build.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
import math
import torch.nn.functional as F
# from .utils import cosine_scheduler
import matplotlib.pyplot as plt
import numpy as np
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self, smoothing: float = 0.1,
reduction="mean", weight=None):
super(LabelSmoothingCrossEntropy, self).__init__()
self.smoothing = smoothing
self.reduction = reduction
self.weight = weight
def reduce_loss(self, loss):
return loss.mean() if self.reduction == 'mean' else loss.sum() \
if self.reduction == 'sum' else loss
def linear_combination(self, x, y):
return self.smoothing * x + (1 - self.smoothing) * y
def forward(self, preds, target):
assert 0 <= self.smoothing < 1
if self.weight is not None:
self.weight = self.weight.to(preds.device)
n = preds.size(-1)
log_preds = F.log_softmax(preds, dim=-1)
loss = self.reduce_loss(-log_preds.sum(dim=-1))
nll = F.nll_loss(
log_preds, target, reduction=self.reduction, weight=self.weight
)
return self.linear_combination(loss / n, nll)
def build_optim(args, model):
if args.optim == 'SGD':
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
elif args.optim == 'Adam':
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.learning_rate
)
elif args.optim == 'AdamW':
optimizer = torch.optim.AdamW(
model.parameters(),
lr=args.learning_rate
)
return optimizer
#
def build_scheduler(args, optimizer):
if args.scheduler['name'] == 'cosin':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs-args.scheduler['warm_up_epochs']), eta_min=args.learning_rate_min)
elif args.scheduler['name'] == 'ReduceLR':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1,
patience=args.scheduler['patience'], verbose=True,
threshold=0.0001,
threshold_mode='rel', cooldown=3, min_lr=0.00001,
eps=1e-08)
else:
raise NameError('build scheduler error!')
if args.scheduler['warm_up_epochs'] > 0:
warmup_schedule = lambda epoch: np.linspace(1e-8, args.learning_rate, args.scheduler['warm_up_epochs'])[epoch]
return (scheduler, warmup_schedule)
return (scheduler,)
def build_loss(args):
loss_Function=dict(
CE_smooth = LabelSmoothingCrossEntropy(),
CE = torch.nn.CrossEntropyLoss(),
MSE = torch.nn.MSELoss(),
BCE = torch.nn.BCELoss(),
SoftCE = SoftTargetCrossEntropy(),
TempLoss = TempoLoss(),
)
if args.loss['name'] == 'CE' and args.loss['labelsmooth']:
return loss_Function['CE_smooth']
return loss_Function[args.loss['name']]
class SoftTargetCrossEntropy(torch.nn.Module):
def __init__(self, args=None):
self.args = args
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# for ii, t in enumerate(target):
# v, l = torch.topk(t, k=2, dim=-1)
# for i in l:
# if i in [0,1,3,8,15,16,17,18]:
# target[ii, i] *= 1.5
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
class TempoLoss(torch.nn.Module):
def __init__(self):
super(TempoLoss, self).__init__()
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
'''
x: troch.size([b, t, l])
'''
loss = 0.0
for i in range(x.size(1)):
inp, tar = x[:, i, :], target[:, i, :]
loss += torch.sum(-tar * F.log_softmax(inp, dim=-1), dim=-1).mean()
return loss / x.size(1)
class RCM_loss(torch.nn.Module):
def __init__(self, args, model: torch.nn.Module):
super(RCM_loss, self).__init__()
self.args = args
def forward(self, x):
temp_out, target_out = x
distill_loss = torch.tensor(0.0).cuda()
for i, temp_w in enumerate(temp_out):
# target_weight = self.dtn.multi_scale_transformers[i+3].transformer_enc_media.layers[-1][0].fn.scores
# target_weight = target_weight.mean(1).mean(1)[:, 1:]
# target_weight = target_weight.mean(1)[:, 0, 1:]
# target_weight = self.dtn.multi_scale_transformers[i].class_embedding
# target_weight = self.dtn.multi_scale_transformers[1][2].layers[i][0].fn.scores
# # # target_weight = target_weight.mean(1).mean(1)
# target_weight = target_weight.mean(1).mean(1)[:, 1:]
target_weight = torch.zeros_like(target_out[0][0])
for j in range(len(target_out)):
target_weight += target_out[j][-(len(temp_out)-i)]
T = self.args.temper
# distill_loss += F.kl_div(F.log_softmax(temp_w / T, dim=-1),
# F.log_softmax(target_weight.detach() / T, dim=-1),
# reduction='sum')
# # distill_loss += self.MSE(temp_w, F.softmax(target_weight.detach(), dim=-1))
target_weight = torch.softmax(target_weight / T, dim=-1)
distill_loss += torch.sum(-target_weight * F.log_softmax(temp_w / T, dim=-1), dim=-1).mean()
return distill_loss/len(temp_out)
| 5,901 | 37.575163 | 118 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/base.py | '''
This file is modified from:
https://github.com/zhoubenjia/RAAR3DNet/blob/master/Network_Train/lib/datasets/base.py
'''
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, set_image_backend
import torch.nn.functional as F
from PIL import Image
from PIL import ImageFilter, ImageOps
import os, glob
import math, random
import numpy as np
import logging
from tqdm import tqdm as tqdm
import pandas as pd
from multiprocessing import Pool, cpu_count
import multiprocessing as mp
import cv2
import json
from scipy.ndimage.filters import gaussian_filter
from timm.data.random_erasing import RandomErasing
# from vidaug import augmentors as va
from .augmentation import *
# import functools
import matplotlib.pyplot as plt # For graphics
from torchvision.utils import save_image, make_grid
np.random.seed(123)
class Normaliztion(object):
"""
same as mxnet, normalize into [-1, 1]
image = (image - 127.5)/128
"""
def __call__(self, Image):
new_video_x = (Image - 127.5) / 128
return new_video_x
class Datasets(Dataset):
global kpt_dict
def __init__(self, args, ground_truth, modality, phase='train'):
self.dataset_root = args.data
self.sample_duration = args.sample_duration
self.sample_size = args.sample_size
self.phase = phase
self.typ = modality
self.args = args
self._w = args.w
if phase == 'train':
self.transform = transforms.Compose([
Normaliztion(),
transforms.ToTensor(),
RandomErasing(args.reprob, mode=args.remode, max_count=args.recount, num_splits=0, device='cpu')
])
else:
self.transform = transforms.Compose([Normaliztion(), transforms.ToTensor()])
self.inputs, self.video_apth = self.prepropose(ground_truth)
def prepropose(self, ground_truth, min_frames=16):
def get_data_list_and_label(data_df):
return [(lambda arr: (arr[0], int(arr[1]), int(arr[2])))(i[:-1].split(' '))
for i in open(data_df).readlines()]
self.inputs = list(filter(lambda x: x[1] > min_frames, get_data_list_and_label(ground_truth)))
self.inputs = list(self.inputs)
self.batch_check()
self.video_apth = dict([(self.inputs[i][0], i) for i in range(len(self.inputs))])
return self.inputs, self.video_apth
def batch_check(self):
if self.phase == 'train':
while len(self.inputs) % (self.args.batch_size * self.args.nprocs) != 0:
sample = random.choice(self.inputs)
self.inputs.append(sample)
else:
while len(self.inputs) % (self.args.test_batch_size * self.args.nprocs) != 0:
sample = random.choice(self.inputs)
self.inputs.append(sample)
def __str__(self):
if self.phase == 'train':
frames = [n[1] for n in self.inputs]
return 'Training Data Size is: {} \n'.format(len(self.inputs)) + 'Average Train Data frames are: {}, max frames: {}, min frames: {}\n'.format(sum(frames)//len(self.inputs), max(frames), min(frames))
else:
frames = [n[1] for n in self.inputs]
return 'Validation Data Size is: {} \n'.format(len(self.inputs)) + 'Average validation Data frames are: {}, max frames: {}, min frames: {}\n'.format(
sum(frames) // len(self.inputs), max(frames), min(frames))
def transform_params(self, resize=(320, 240), crop_size=224, flip=0.5):
if self.phase == 'train':
left, top = np.random.randint(0, resize[0] - crop_size), np.random.randint(0, resize[1] - crop_size)
is_flip = True if np.random.uniform(0, 1) < flip else False
else:
left, top = (resize[0] - crop_size) // 2, (resize[1] - crop_size) // 2
is_flip = False
return (left, top, left + crop_size, top + crop_size), is_flip
def rotate(self, image, angle, center=None, scale=1.0):
(h, w) = image.shape[:2]
if center is None:
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def get_path(self, imgs_path, a):
return os.path.join(imgs_path, "%06d.jpg" % a)
def depthProposess(self, img):
h2, w2 = img.shape
mask = img.copy()
mask = cv2.erode(mask, np.ones((3, 3), np.uint8))
mask = cv2.dilate(mask, np.ones((10, 10), np.uint8))
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find Max Maxtri
Idx = []
for i in range(len(contours)):
Area = cv2.contourArea(contours[i])
if Area > 500:
Idx.append(i)
centers = []
for i in Idx:
rect = cv2.minAreaRect(contours[i])
center, (h, w), degree = rect
centers.append(center)
finall_center = np.int0(np.array(centers))
c_x = min(finall_center[:, 0])
c_y = min(finall_center[:, 1])
center = (c_x, c_y)
crop_x, crop_y = 320, 240
left = center[0] - crop_x // 2 if center[0] - crop_x // 2 > 0 else 0
top = center[1] - crop_y // 2 if center[1] - crop_y // 2 > 0 else 0
crop_w = left + crop_x if left + crop_x < w2 else w2
crop_h = top + crop_y if top + crop_y < h2 else h2
rect = (left, top, crop_w, crop_h)
image = Image.fromarray(img)
image = image.crop(rect)
return image
def image_propose(self, data_path, sl):
sample_size = self.sample_size
resize = eval(self.args.resize)
crop_rect, is_flip = self.transform_params(resize=resize, crop_size=self.args.crop_size, flip=self.args.flip)
if np.random.uniform(0, 1) < self.args.rotated and self.phase == 'train':
r, l = eval(self.args.angle)
rotated = np.random.randint(r, l)
else:
rotated = 0
sometimes = lambda aug: Sometimes(0.5, aug) # Used to apply augmentor with 50% probability
self.seq_aug = Sequential([
RandomResize(self.args.resize_rate),
RandomCrop(resize),
# RandomTranslate(self.args.translate, self.args.translate),
# sometimes(Salt()),
# sometimes(GaussianBlur()),
])
def transform(img):
img = np.asarray(img)
if img.shape[-1] != 3:
img = np.uint8(255 * img)
img = self.depthProposess(img)
img = cv2.applyColorMap(np.asarray(img), cv2.COLORMAP_JET)
img = self.rotate(np.asarray(img), rotated)
img = Image.fromarray(img)
if self.phase == 'train' and self.args.strong_aug:
img = self.seq_aug(img)
img = img.resize(resize)
img = img.crop(crop_rect)
if is_flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return np.array(img.resize((sample_size, sample_size)))
def Sample_Image(imgs_path, sl):
frams = []
for a in sl:
ori_image = Image.open(self.get_path(imgs_path, a))
img = transform(ori_image)
frams.append(self.transform(img).view(3, sample_size, sample_size, 1))
if self.args.frp:
skgmaparr = DynamicImage(frams, dynamic_only=False) #[t, c, h, w]
else:
skgmaparr = torch.ones(*img.shape, 1)
return torch.cat(frams, dim=3).type(torch.FloatTensor), skgmaparr
def DynamicImage(frames, dynamic_only): # frames: [[3, 224, 224, 1], ]
def tensor_arr_rp(arr):
l = len(arr)
statics = []
def tensor_rankpooling(video_arr, lamb=1.):
def get_w(N):
return [float(i) * 2 - N - 1 for i in range(1, N + 1)]
re = torch.zeros(*video_arr[0].size()[:-1])
for a, b in zip(video_arr, get_w(len(video_arr))):
re += a.squeeze() * b
re = (re - re.min()) / (re.max() - re.min())
re = np.uint8(255 * np.float32(re.numpy())).transpose(1,2,0)
re = self.transform(np.array(re))
return re.unsqueeze(-1)
return [tensor_rankpooling(arr[i:i + self._w]) for i in range(l)]
arrrp = tensor_arr_rp(frames)
arrrp = torch.cat(arrrp[:-1], dim=-1).type(torch.FloatTensor)
return arrrp
return Sample_Image(data_path, sl)
def get_sl(self, clip):
sn = self.sample_duration if not self.args.frp else self.sample_duration+1
if self.phase == 'train':
f = lambda n: [(lambda n, arr: n if arr == [] else np.random.choice(arr))(n * i / sn,
range(int(n * i / sn),
max(int(n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
else:
f = lambda n: [(lambda n, arr: n if arr == [] else int(np.mean(arr)))(n * i / sn, range(int(n * i / sn),
max(int(
n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
sample_clips = f(int(clip)-self.args.sample_window)
start = random.sample(range(0, self.args.sample_window), 1)[0]
if self.phase == 'train':
return [l + start for l in sample_clips]
else:
return f(int(clip))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def __len__(self):
return len(self.inputs)
if __name__ == '__main__':
import argparse
from config import Config
from lib import *
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='', help='Place config Congfile!')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment name')
parser.add_argument('--seed', type=int, default=123, help='random seed')
args = parser.parse_args()
args = Config(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
args.dist = False
args.eval_only = True
args.test_batch_size = 1
valid_queue, valid_sampler = build_dataset(args, phase='val')
for step, (inputs, heatmap, target, _) in enumerate(valid_queue):
print(inputs.shape)
input() | 12,341 | 41.412371 | 210 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/Jester.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
import logging
# import accimage
# set_image_backend('accimage')
np.random.seed(123)
class JesterData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(JesterData, self).__init__(args, ground_truth, modality, phase)
def LoadKeypoints(self):
if self.phase == 'train':
kpt_file = os.path.join(self.dataset_root, self.args.splits, 'train_kp.data')
else:
kpt_file = os.path.join(self.dataset_root, self.args.splits, 'valid_kp.data')
with open(kpt_file, 'r') as f:
kpt_data = [(lambda arr: (os.path.join(self.dataset_root, self.typ, self.phase, arr[0]), list(map(lambda x: int(float(x)), arr[1:]))))(l[:-1].split()) for l in f.readlines()]
kpt_data = dict(kpt_data)
for k, v in kpt_data.items():
pose = v[:18*2]
r_hand = v[18*2: 18*2+21*2]
l_hand = v[18*2+21*2: 18*2+21*2+21*2]
kpt_data[k] = {'people': [{'pose_keypoints_2d': pose, 'hand_right_keypoints_2d': r_hand, 'hand_left_keypoints_2d': l_hand}]}
logging.info('Load Keypoints files Done, Total: {}'.format(len(kpt_data)))
return kpt_data
def get_path(self, imgs_path, a):
return os.path.join(imgs_path, "%05d.jpg" % int(a + 1))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
# self.clip = self.image_propose(self.data_path, sl)
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
return self.clip.permute(0, 3, 1, 2), skgmaparr, self.inputs[index][2], self.data_path
def __len__(self):
return len(self.inputs)
| 2,115 | 37.472727 | 186 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/IsoGD.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
# import accimage
# set_image_backend('accimage')
class IsoGDData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(IsoGDData, self).__init__(args, ground_truth, modality, phase)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.typ, self.inputs[index][0])
if self.typ == 'depth':
self.data_path = self.data_path.replace('M_', 'K_')
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path1 = self.data_path.replace('rgb', 'depth')
self.data_path1 = self.data_path1.replace('M', 'K')
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
self.clip1, skgmaparr1 = self.image_propose(self.data_path1, sl)
# return (self.clip.permute(0, 3, 1, 2), skgmaparr), (self.clip1.permute(0, 3, 1, 2), skgmaparr1), self.inputs[index][2], self.inputs[index][0]
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), self.inputs[index][2], self.data_path
else:
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def __len__(self):
return len(self.inputs)
| 1,844 | 40 | 155 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/THU_READ.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
import logging
import cv2
from einops import rearrange, repeat
from torchvision.utils import save_image, make_grid
np.random.seed(123)
class THUREAD(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(THUREAD, self).__init__(args, ground_truth, modality, phase)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path1 = self.data_path.replace('RGB', 'Depth')
self.data_path1 = '/'.join(self.data_path1.split('/')[:-1]) + '/{}'.format(
self.data_path1.split('/')[-1].replace('Depth', 'D'))
self.clip1, skgmaparr1 = self.image_propose(self.data_path1, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), \
self.inputs[index][2], self.video_apth[self.inputs[index][0]]
return self.clip.permute(0, 3, 1, 2), skgmaparr, self.inputs[index][2], self.video_apth[self.inputs[index][0]]
def __len__(self):
return len(self.inputs)
| 1,708 | 33.18 | 118 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/transforms_factory.py | """ Transforms Factory
Factory methods for building image transforms for use with TIMM (PyTorch Image Models)
Hacked together by / Copyright 2019, Ross Wightman
"""
import math
import torch
from torchvision import transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
from .auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform
from timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation, ToNumpy
from timm.data.random_erasing import RandomErasing
def transforms_noaug_train(
img_size=224,
interpolation='bilinear',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
):
if interpolation == 'random':
# random interpolation not supported with no-aug
interpolation = 'bilinear'
tfl = [
transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)),
transforms.CenterCrop(img_size)
]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
return transforms.Compose(tfl)
def transforms_imagenet_train(
img_size=224,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.,
color_jitter=0.4,
auto_augment=None,
interpolation='random',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0,
separate=False,
):
"""
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range
primary_tfl = [
RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)]
if hflip > 0.:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, (tuple, list)):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != 'random':
aa_params['interpolation'] = str_to_pil_interp(interpolation)
if auto_augment.startswith('rand'):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith('augmix'):
aa_params['translate_pct'] = 0.3
secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)]
else:
secondary_tfl += [auto_augment_transform(auto_augment, aa_params)]
elif color_jitter is not None:
# color jitter is enabled when not using AA
if isinstance(color_jitter, (list, tuple)):
# color jitter should be a 3-tuple/list if spec brightness/contrast/saturation
# or 4 if also augmenting hue
assert len(color_jitter) in (3, 4)
else:
# if it's a scalar, duplicate for brightness, contrast, and saturation, no hue
color_jitter = (float(color_jitter),) * 3
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
final_tfl = []
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
final_tfl += [ToNumpy()]
else:
final_tfl += [
transforms.ToTensor(),
# transforms.Normalize(
# mean=torch.tensor(mean),
# std=torch.tensor(std))
]
# if re_prob > 0.:
# final_tfl.append(
# RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu'))
if separate:
return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl)
else:
# return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)
a, b = secondary_tfl[0]
return transforms.Compose([b]+final_tfl)
class Normaliztion(object):
"""
same as mxnet, normalize into [-1, 1]
image = (image - 127.5)/128
"""
def __call__(self, Image):
new_video_x = (Image - 127.5) / 128
return new_video_x
def transforms_imagenet_eval(
img_size=224,
crop_pct=None,
interpolation='bilinear',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
crop_pct = crop_pct or DEFAULT_CROP_PCT
if isinstance(img_size, (tuple, list)):
assert len(img_size) == 2
if img_size[-1] == img_size[-2]:
# fall-back to older behaviour so Resize scales to shortest edge if target is square
scale_size = int(math.floor(img_size[0] / crop_pct))
else:
scale_size = tuple([int(x / crop_pct) for x in img_size])
else:
scale_size = int(math.floor(img_size / crop_pct))
tfl = [
transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)),
transforms.CenterCrop(img_size),
]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
return transforms.Compose(tfl)
def create_transform(
input_size,
is_training=False,
use_prefetcher=False,
no_aug=False,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.,
color_jitter=0.4,
auto_augment=None,
interpolation='bilinear',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0,
crop_pct=None,
tf_preprocessing=False,
separate=False):
if isinstance(input_size, (tuple, list)):
img_size = input_size[-2:]
else:
img_size = input_size
if tf_preprocessing and use_prefetcher:
assert not separate, "Separate transforms not supported for TF preprocessing"
from timm.data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(
is_training=is_training, size=img_size, interpolation=interpolation)
else:
if is_training and no_aug:
assert not separate, "Cannot perform split augmentation with no_aug"
transform = transforms_noaug_train(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std)
elif is_training:
transform = transforms_imagenet_train(
img_size,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
color_jitter=color_jitter,
auto_augment=auto_augment,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
separate=separate)
else:
assert not separate, "Separate transforms not supported for validation preprocessing"
transform = transforms_imagenet_eval(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
crop_pct=crop_pct)
return transform
| 8,665 | 34.08502 | 115 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/distributed_sampler.py | '''
This file is modified from:
https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/samplers/distributed_sampler.py
'''
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 1,240 | 33.472222 | 100 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/NvGesture.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
import logging
# import accimage
# set_image_backend('accimage')
np.random.seed(123)
class NvData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(NvData, self).__init__(args, ground_truth, modality, phase)
def transform_params(self, resize=(320, 240), crop_size=224, flip=0.5):
if self.phase == 'train':
left, top = random.randint(10, resize[0] - crop_size), random.randint(10, resize[1] - crop_size)
is_flip = True if random.uniform(0, 1) < flip else False
else:
left, top = 32, 32
is_flip = False
return (left, top, left + crop_size, top + crop_size), is_flip
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.typ, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.FusionNet or self.args.model_ema:
if self.typ == 'rgb':
self.data_path = self.data_path.replace('rgb', 'depth')
else:
self.data_path = self.data_path.replace('depth', 'rgb')
self.clip1, skgmaparr1 = self.image_propose(self.data_path, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), self.inputs[index][2], self.data_path
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.data_path
def __len__(self):
return len(self.inputs)
| 1,963 | 36.769231 | 146 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/NTU.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
def SubSetSampling_func(inputs, reduce=2):
print('Total training examples', len(inputs))
sample_dict = {}
for p, n, l in inputs:
if l not in sample_dict:
sample_dict[l] = [(p, n)]
else:
sample_dict[l].append((p, n))
sample_dict = dict([(k, v[::reduce]) for k, v in sample_dict.items()])
inputs = [(p, n, l) for l, v in sample_dict.items() for p, n in v]
print('Total training examples after sampling', len(inputs))
return inputs
class NTUData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(NTUData, self).__init__(args, ground_truth, modality, phase)
# sub-set sampling
# self.inputs = SubSetSampling_func(self.inputs)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
if self.typ == 'rgb':
self.data_path = os.path.join(self.dataset_root, 'ImagesResize', self.inputs[index][0])
if self.typ == 'depth':
self.data_path = os.path.join(self.dataset_root, 'nturgb+d_depth_masked', self.inputs[index][0][:-4])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path = os.path.join(self.dataset_root, 'nturgb+d_depth_masked', self.inputs[index][0][:-4])
self.clip1, skgmaparr1 = self.image_propose(self.data_path, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), \
self.inputs[index][2], self.data_path
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def get_path(self, imgs_path, a):
if self.typ == 'rgb':
return os.path.join(imgs_path, "%06d.jpg" % int(a + 1))
elif self.typ == 'depth':
return os.path.join(imgs_path, "MDepth-%08d.png" % int(a + 1))
def __len__(self):
return len(self.inputs)
| 2,462 | 35.761194 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/UCF101.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
def SubSetSampling_func(inputs, reduce=2):
print('Total training examples', len(inputs))
sample_dict = {}
for p, n, l in inputs:
if l not in sample_dict:
sample_dict[l] = [(p, n)]
else:
sample_dict[l].append((p, n))
sample_dict = dict([(k, v[::reduce]) for k, v in sample_dict.items()])
inputs = [(p, n, l) for l, v in sample_dict.items() for p, n in v]
print('Total training examples after sampling', len(inputs))
return inputs
class UCFData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(UCFData, self).__init__(args, ground_truth, modality, phase)
# sub-set sampling
# self.inputs = SubSetSampling_func(self.inputs)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path = os.path.join(self.dataset_root, 'nturgb+d_depth_masked', self.inputs[index][0][:-4])
self.clip1, skgmaparr1 = self.image_propose(self.data_path, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), \
self.inputs[index][2], self.data_path
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def get_path(self, imgs_path, a):
return os.path.join(imgs_path, "%06d.jpg" % int(a))
def __len__(self):
return len(self.inputs)
| 2,117 | 34.898305 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/build.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .distributed_sampler import DistributedSampler
from .IsoGD import IsoGDData
from .NvGesture import NvData
from .THU_READ import THUREAD
from .Jester import JesterData
from .NTU import NTUData
from .UCF101 import UCFData
from .base import Datasets
import logging
from torch.utils.data.sampler import WeightedRandomSampler
def build_dataset(args, phase):
modality = dict(
M='rgb',
K='depth',
F='Flow'
)
assert args.type in modality, 'Error in modality! The currently supported modalities include: M (RGB), K (Depth) and F (Flow)'
Datasets_func = dict(
basic=Datasets,
NvGesture=NvData,
IsoGD=IsoGDData,
THUREAD=THUREAD,
Jester=JesterData,
NTU=NTUData,
)
assert args.dataset in Datasets_func, 'Error in dataset Function!'
if args.local_rank == 0:
logging.info('Dataset:{}, Modality:{}'.format(args.dataset, modality[args.type]))
splits = args.splits + '/{}.txt'.format(phase)
dataset = Datasets_func[args.dataset](args, splits, modality[args.type], phase=phase)
print(dataset)
if args.dist:
data_sampler = DistributedSampler(dataset)
else:
data_sampler = None
if phase == 'train':
return torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers,
shuffle=(data_sampler is None),
sampler=data_sampler, pin_memory=True, drop_last=False), data_sampler
else:
args.test_batch_size = int(1.5 * args.batch_size)
return torch.utils.data.DataLoader(dataset, batch_size=args.test_batch_size, num_workers=args.num_workers,
shuffle=False,
sampler=data_sampler, pin_memory=True, drop_last=False), data_sampler | 1,992 | 36.603774 | 130 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DSN.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/i3d.py
'''
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import os, math
import sys
from .DTN import DTNNet
from .FRP import FRP_Module
from .utils import *
import os, math
import sys
sys.path.append('../../')
from collections import OrderedDict
from utils import load_pretrained_checkpoint
import logging
class DSNNet(nn.Module):
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c'
)
def __init__(self, args, num_classes=400, spatial_squeeze=True, name='inception_i3d', in_channels=3, dropout_keep_prob=0.5,
pretrained: str = False,
dropout_spatial_prob: float=0.0,
frames_drop_rate: float=0.0):
super(DSNNet, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self.logits = None
self.args = args
self.end_points = {}
'''
Low Level Features Extraction
'''
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[1, 7, 7],
stride=(1, 2, 2), padding=(0, 3, 3), name=name + end_point)
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
name=name + end_point)
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[1, 3, 3],
padding=(0, 1, 1),
name=name + end_point)
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
'''
Spatial Multi-scale Features Learning
'''
end_point = 'Mixed_3b'
self.end_points[end_point] = SpatialInceptionModule(192, [64, 96, 128, 16, 32, 32], name + end_point)
end_point = 'Mixed_3c'
self.end_points[end_point] = SpatialInceptionModule(256, [128, 128, 192, 32, 96, 64], name + end_point)
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_4b'
self.end_points[end_point] = SpatialInceptionModule(128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], name + end_point)
end_point = 'Mixed_4c'
self.end_points[end_point] = SpatialInceptionModule(192 + 208 + 48 + 64, [160, 112, 224, 24, 64, 64], name + end_point)
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 2, 2], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_5b'
self.end_points[end_point] = SpatialInceptionModule(160 + 224 + 64 + 64, [256, 160, 320, 32, 128, 128],
name + end_point)
end_point = 'Mixed_5c'
self.end_points[end_point] = SpatialInceptionModule(256 + 320 + 128 + 128, [384, 192, 384, 48, 128, 128],
name + end_point)
self.LinearMap = nn.Sequential(
nn.LayerNorm(1024),
nn.Linear(1024, 512),
)
self.avg_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.build()
self.dtn = DTNNet(args, num_classes=self._num_classes)
self.rrange = Rearrange('b c t h w -> b t c h w')
self.frames_droupout = torch.nn.Dropout2d(p=frames_drop_rate, inplace=False)
if args.frp:
self.frp_module = FRP_Module(w=args.w, inplanes=64)
if pretrained:
load_pretrained_checkpoint(self, pretrained)
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x, garr):
inp = x
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
if end_point in ['Mixed_3b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
elif end_point in ['Mixed_4b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
f = x
elif end_point in ['Mixed_5b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
else:
x = self._modules[end_point](x)
feat = x
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
x = self.frames_droupout(x)
cnn_vison = self.rrange(f.sum(dim=1, keepdim=True))
logits, distillation_loss, (att_map, cosin_similar, MHAS, visweight) = self.dtn(x)
# return logits, distillation_loss, (cnn_vison[0], att_map, cosin_similar, visweight, MHAS, (feat, inp[0, :]))
return logits, distillation_loss, (cnn_vison[0], None, cosin_similar, visweight, (feat.data, inp[0, :]))
| 6,208 | 37.092025 | 127 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/FRP.py | '''
This file is modified from:
https://github.com/zhoubenjia/RAAR3DNet/blob/master/Network_Train/lib/model/RAAR3DNet.py
'''
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
import numpy as np
import cv2
from torchvision.utils import save_image, make_grid
def tensor_split(t):
arr = torch.split(t, 1, dim=2)
arr = [x.squeeze(2) for x in arr]
return arr
def tensor_merge(arr):
arr = [x.unsqueeze(1) for x in arr]
t = torch.cat(arr, dim=1)
return t.permute(0, 2, 1, 3, 4)
class FRP_Module(nn.Module):
def __init__(self, w, inplanes):
super(FRP_Module, self).__init__()
self._w = w
self.rpconv1d = nn.Conv1d(2, 1, 1, bias=False) # Rank Pooling Conv1d, Kernel Size 2x1x1
self.rpconv1d.weight.data = torch.FloatTensor([[[1.0], [0.0]]])
# self.bnrp = nn.BatchNorm3d(inplanes) # BatchNorm Rank Pooling
# self.relu = nn.ReLU(inplace=True)
self.hapooling = nn.MaxPool2d(kernel_size=2)
def forward(self, x, datt=None):
inp = x
if self._w < 1:
return x
def run_layer_on_arr(arr, l):
return [l(x) for x in arr]
def oneconv(a, b):
s = a.size()
c = torch.cat([a.contiguous().view(s[0], -1, 1), b.contiguous().view(s[0], -1, 1)], dim=2)
c = self.rpconv1d(c.permute(0, 2, 1)).permute(0, 2, 1)
return c.view(s)
if datt is not None:
tarr = tensor_split(x)
garr = tensor_split(datt)
while tarr[0].size()[3] < garr[0].size()[3]: # keep feature map and heatmap the same size
garr = run_layer_on_arr(garr, self.hapooling)
attarr = [a * (b + torch.ones(a.size()).cuda()) for a, b in zip(tarr, garr)]
datt = [oneconv(a, b) for a, b in zip(tarr, attarr)]
return tensor_merge(datt)
def tensor_arr_rp(arr):
l = len(arr)
def tensor_rankpooling(video_arr):
def get_w(N):
return [float(i) * 2 - N - 1 for i in range(1, N + 1)]
# re = torch.zeros(video_arr[0].size(0), 1, video_arr[0].size(2), video_arr[0].size(3)).cuda()
re = torch.zeros(video_arr[0].size()).cuda()
for a, b in zip(video_arr, get_w(len(video_arr))):
# a = transforms.Grayscale(1)(a)
re += a * b
re = F.gelu(re)
re -= torch.min(re)
re = re / torch.max(re) if torch.max(re) != 0 else re / (torch.max(re) + 0.00001)
return transforms.Grayscale(1)(re)
return [tensor_rankpooling(arr[i:i + self._w]) for i in range(l)]
arrrp = tensor_arr_rp(tensor_split(x))
b, c, t, h, w = tensor_merge(arrrp).shape
mask = torch.zeros(b, c, self._w-1, h, w, device=tensor_merge(arrrp).device)
garrs = torch.cat((mask, tensor_merge(arrrp)), dim=2)
return garrs
if __name__ == '__main__':
model = SATT_Module().cuda()
inp = torch.randn(2, 3, 64, 224, 224).cuda()
out = model(inp)
print(out.shape)
| 3,236 | 36.206897 | 110 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/fusion_Net.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
import os
import sys
from collections import OrderedDict
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
sys.path.append(['../../', '../'])
from utils import load_pretrained_checkpoint, load_checkpoint, SoftTargetCrossEntropy, concat_all_gather, uniform_sampling
import logging
# from .DSN_Fusion import DSNNet
from .DSN_v2 import DSNNetV2
from .DTN_v2 import DTNNet as DTNNetV2
from .DTN_v2 import Transformer, clsToken
from .trans_module import *
class Encoder(nn.Module):
def __init__(self, C_in, C_out, dilation=2):
super(Encoder, self).__init__()
self.enconv = nn.Sequential(
nn.Conv2d(C_in, C_in, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_in),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in // 2, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_in // 2),
nn.ReLU(inplace=False),
nn.Conv2d(C_in // 2, C_in // 4, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_in // 4),
nn.ReLU(inplace=False),
nn.Conv2d(C_in // 4, C_out, kernel_size=1, stride=1, padding=0, bias=False),
)
def forward(self, x1, x2):
b, c = x1.shape
x = torch.cat((x1, x2), dim=1).view(b, -1, 1, 1)
x = self.enconv(x)
return x
class Decoder(nn.Module):
def __init__(self, C_in, C_out, dilation=2):
super(Decoder, self).__init__()
self.deconv = nn.Sequential(
nn.Conv2d(C_in, C_out // 4, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out // 4),
nn.ReLU(),
nn.Conv2d(C_out // 4, C_out // 2, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out // 2),
nn.ReLU(),
)
def forward(self, x):
x = self.deconv(x)
return x
class FusionModule(nn.Module):
def __init__(self, channel_in=1024, channel_out=256, num_classes=60):
super(FusionModule, self).__init__()
self.encoder = Encoder(channel_in, channel_out)
self.decoder = Decoder(channel_out, channel_in)
self.efc = nn.Conv2d(channel_out, num_classes, kernel_size=1, padding=0, bias=False)
def forward(self, r, d):
en_x = self.encoder(r, d) # [4, 256, 1, 1]
de_x = self.decoder(en_x)
en_x = self.efc(en_x)
return en_x.squeeze(), de_x
class DTN(DTNNetV2):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x):
B, N, C = x.shape
# Local-Global features capturing
outputs, tem_feat = [], []
temp = self.temp_schedule[self._args.epoch]
for cls_token, (TCNN, MaxPool, TransBlock, mlp) in zip(self.cls_tokens, self.multi_scale_transformers):
sl =uniform_sampling(x.size(1), cls_token.frame_rate, random=self.training)
sub_x = x[:, sl, :]
sub_x = sub_x.permute(0, 2, 1).view(B, C, -1, 1, 1)
sub_x = MaxPool(TCNN(sub_x))
sub_x = sub_x.permute(0, 2, 1, 3, 4).view(B, -1, C)
sub_x = cls_token(sub_x)
sub_x = TransBlock(sub_x)
sub_x = sub_x[:, 0, :]
tem_feat.append(sub_x.unsqueeze(-1))
out = mlp(sub_x)
outputs.append(out / temp)
# Multi-branch fusion
if self.branch_merge == 'sum':
x = torch.zeros_like(out)
for out in outputs:
x += out
elif self.branch_merge == 'pool':
x = torch.cat([out.unsqueeze(-1) for out in outputs], dim=-1)
x = self.max_pool(x).squeeze()
return x, outputs, torch.cat(tem_feat, dim=-1).mean(-1)
class DSN(DSNNetV2):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dtn = DTN(self.args, num_classes=self._num_classes)
def forward(self, x, endpoint=None):
# if endpoint=='spatial':
x = self.stem(x)
temp_out = []
for i, sms_layer in enumerate(self.SMS_layers):
x = sms_layer(x)
if isinstance(x, tuple):
x, temp_w = x
temp_out.append(temp_w)
self.feat = x
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
x = self.frames_droupout(x)
spati_feat = x
self.visweight = torch.sigmoid(x[0])
target_out = []
for j in range(len(self.dtn.multi_scale_transformers)):
target_out.append(self.dtn.multi_scale_transformers[j][2].get_classEmbd())
# return x, (temp_out, target_out)
x, (xs, xm, xl), tem_feat = self.dtn(x)
return (x, xs, xm, xl), spati_feat, tem_feat, (temp_out, target_out)
class AttentionNet(nn.Module):
def __init__(self, dim=512, heads=8, dim_head=64, mlp_dim=768, dropout=0.1, knn_attention=True, topk=0.7):
super(AttentionNet, self).__init__()
self.knn_attention = knn_attention
self.topk = topk
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.q = nn.Linear(dim, inner_dim, bias=False)
self.k = nn.Linear(dim, inner_dim, bias=False)
self.v = nn.Linear(dim, inner_dim, bias=False)
self.norm = nn.LayerNorm(dim)
self.ffn = PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
# self.map = nn.Linear(inner_dim, dim, bias=True)
def forward(self, x_r, x_d):
b, n, c, h = *x_r.shape, self.heads
q, k, v = self.q(x_r), self.k(x_d), self.v(x_r)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), [q, k, v])
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if self.knn_attention:
mask = torch.zeros(b, self.heads, n, n, device=x_r.device, requires_grad=False)
index = torch.topk(dots, k=int(dots.size(-1)*self.topk), dim=-1, largest=True)[1]
mask.scatter_(-1, index, 1.)
dots = torch.where(mask > 0, dots, torch.full_like(dots, float('-inf')))
attn = dots.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.norm(out) + x_r
out = self.ffn(out) + out
return out
class EnhanceModule(nn.Module):
def __init__(self, dim=512):
super(EnhanceModule, self).__init__()
self.mlp_rgb = nn.Sequential(
nn.Linear(dim*2, dim),
nn.ReLU(),
nn.Linear(dim, dim),
nn.Sigmoid()
)
self.mlp_depth = nn.Sequential(
nn.Linear(dim*2, dim),
nn.ReLU(),
nn.Linear(dim, dim),
nn.Sigmoid()
)
self.norm = nn.LayerNorm(dim*2)
def forward(self, xr, xd):
joint_feature = self.norm(torch.cat((xr, xd), dim=-1))
score_grb = self.mlp_rgb(joint_feature)
score_depth = self.mlp_depth(joint_feature)
xr = xr * score_grb
xd = xd * score_depth
return xr, xd
class ComplementSpatial(nn.Module):
def __init__(self, depths=2, dim=512):
super(ComplementSpatial, self).__init__()
self.att_nets = nn.ModuleList([])
for _ in range(depths):
self.att_nets.append(nn.ModuleList([
EnhanceModule(dim),
AttentionNet(dim),
AttentionNet(dim)
]))
self.norm = nn.LayerNorm(dim*2)
def forward(self, xr, xd):
b, n, c = xr.shape
xr, xd = torch.split(self.norm(torch.cat((xr, xd), dim=-1)), [c, c], dim=-1)
for EM, ANM, ANK in self.att_nets:
xr, xd = EM(xr, xd)
# xr, xd = ANM(xr, xd), ANK(xd, xr)
cm = ANM(xr, xd)
ck = ANK(xd, xr)
xr, xd = cm, ck
return xr, xd
class ComplementTemporal(nn.Module):
def __init__(self, depths=2, dim=512):
super(ComplementTemporal, self).__init__()
self.att_nets = nn.ModuleList([])
for _ in range(depths):
self.att_nets.append(nn.ModuleList([
# EnhanceModule(dim),
AttentionNet(dim),
AttentionNet(dim)
]))
self.norm = nn.LayerNorm(dim*2)
def forward(self, xr, xd):
b, n, c = xr.shape
xr, xd = torch.split(self.norm(torch.cat((xr, xd), dim=-1)), [c, c], dim=-1)
for ANM, ANK in self.att_nets:
# xr, xd = ANM(xr, xd), ANK(xd, xr)
# xr, xd = EM(xr, xd)
cm = ANM(xr, xd)
ck = ANK(xd, xr)
xr, xd = cm, ck
return xr, xd
class SFNNet(nn.Module):
def __init__(self, args, num_classes, pretrained, spatial_interact=False, temporal_interact=False):
super(SFNNet, self).__init__()
self.linear = nn.Linear(2, num_classes)
def forward(self, logitr, logitd):
b, c = logitr.shape
softmaxr = torch.softmax(logitr, dim=-1)
softmaxd = torch.softmax(logitd, dim=-1)
cat_softmax = torch.cat((softmaxr.unsqueeze(-1), softmaxd.unsqueeze(-1)), dim=-1)
output = self.linear(cat_softmax)
output *= torch.eye(c, c, device=logitr.device, requires_grad=False)
return output.sum(-1)
class CrossFusionNet(nn.Module):
def __init__(self, args, num_classes, pretrained, spatial_interact=False, temporal_interact=False):
super(CrossFusionNet, self).__init__()
self._MES = torch.nn.MSELoss()
self._BCE = torch.nn.BCELoss()
self._CE = SoftTargetCrossEntropy()
self.spatial_interact = spatial_interact
self.temporal_interact = temporal_interact
self.args = args
self.frame_rate = args.sample_duration #//2 if args.sample_duration > 32 else args.sample_duration
self.visweight = None
self.feat = None
self.pca_data = None
self.target_data = None
self.SCC_Module = ComplementSpatial(depths=args.scc_depth)
self.temp_enhance_module = EnhanceModule(dim=512)
self.TimesFormer = ComplementTemporal(depths=args.tcc_depth)
# self.timesform1 = Transformer(dim=512, depth=2, heads=8, dim_head=64, mlp_dim=768,
# dropout=0.1)
# self.cls_token1 = clsToken(self.frame_rate+1, 512)
self.pos_embedding_M = nn.Parameter(torch.randn(1, self.frame_rate + 1, 512))
# self.timesform2 = Transformer(dim=512, depth=2, heads=8, dim_head=64, mlp_dim=768,
# dropout=0.1)
# self.cls_token2 = clsToken(self.frame_rate+1, 512)
self.pos_embedding_K = nn.Parameter(torch.randn(1, self.frame_rate + 1, 512))
self.classifier1 = nn.Linear(512, num_classes)
self.classifier2 = nn.Linear(512, num_classes)
self.max_pool = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=0)
self.norm1 = nn.LayerNorm(512)
self.norm2 = nn.LayerNorm(512)
if pretrained:
load_pretrained_checkpoint(self, pretrained)
logging.info("Load Pre-trained model state_dict Done !")
def forward(self, hidden_feature):
spatial_M, spatial_K, temporal_M, temporal_K = hidden_feature
comple_features_M, comple_features_K = self.SCC_Module(spatial_M, spatial_K)
b, n, c = comple_features_M.shape
# if self.frame_rate > 32:
# comple_features_M = self.max_pool(comple_features_M.view(b, c, n, 1, 1)).view(b, n//2, c)
# comple_features_K = self.max_pool(comple_features_K.view(b, c, n, 1, 1)).view(b, n//2, c)
temporal_enhance_M, temporal_enhance_K = self.temp_enhance_module(temporal_M, temporal_K)
# temporal_enhance_M, temporal_enhance_K = temporal_M, temporal_K
temporal_feature_M = self.norm1(torch.cat((temporal_enhance_M.unsqueeze(1), comple_features_M), dim=1))
temporal_feature_M += self.pos_embedding_M
# temporal_feature_M = self.cls_token1(temporal_feature_M)
# temporal_feature_M = self.timesform1(temporal_feature_M)
temporal_feature_K = self.norm2(torch.cat((temporal_enhance_K.unsqueeze(1), comple_features_K), dim=1))
temporal_feature_K += self.pos_embedding_K
# temporal_feature_K = self.cls_token2(temporal_feature_K)
# temporal_feature_K = self.timesform2(temporal_feature_K)
temporal_feature_M, temporal_feature_K = self.TimesFormer(temporal_feature_M, temporal_feature_K)
out_M = self.classifier1(temporal_feature_M[:, 0])
out_K = self.classifier2(temporal_feature_K[:, 0])
normal_func = lambda x: concat_all_gather(F.normalize(x, p = 2, dim=-1))
b, _ = normal_func(temporal_M).shape
self.pca_data = torch.cat((normal_func(temporal_M),normal_func(temporal_K), normal_func(temporal_feature_M[:, 0]), normal_func(temporal_feature_K[:, 0])))
self.target_data = torch.cat((torch.ones(b), torch.ones(b)+1, torch.ones(b)+2, torch.ones(b)+3))
return (out_M,out_K), (None, torch.cat((temporal_feature_M[:, 0].unsqueeze(-1), temporal_feature_K[:, 0].unsqueeze(-1)), dim=-1))
def get_cluster_visualization(self):
return self.pca_data, self.target_data
def get_visualization(self):
return self.feat, self.visweight
class FeatureCapter(nn.Module):
def __init__(self, args, num_classes=249, pretrained=None):
super(FeatureCapter, self).__init__()
self.args = args
assert args.rgb_checkpoint and args.depth_checkpoint
self.Modalit_rgb = DSN(args, num_classes=num_classes)
self.Modalit_depth = DSN(args, num_classes=num_classes)
rgb_checkpoint = args.rgb_checkpoint[args.FusionNet]
self.strat_epoch_r, best_acc = load_checkpoint(self.Modalit_rgb, rgb_checkpoint)
print(f'Best acc RGB: {best_acc}')
depth_checkpoint = args.depth_checkpoint[args.FusionNet]
self.strat_epoch_d, best_acc = load_checkpoint(self.Modalit_depth, depth_checkpoint)
print(f'Best acc depth: {best_acc}')
def forward(self, rgb, depth):
self.args.epoch = self.strat_epoch_r - 1
(logit_M, M_xs, M_xm, M_xl), spatial_M, temporal_M, temp_out_M = self.Modalit_rgb(rgb, endpoint='spatial')
self.args.epoch = self.strat_epoch_d - 1
(logit_K, K_xs, K_xm, K_xl), spatial_K, temporal_K, temp_out_K = self.Modalit_depth(depth, endpoint='spatial')
return (logit_M, M_xs, M_xm, M_xl), (logit_K, K_xs, K_xm, K_xl), (spatial_M, spatial_K, temporal_M, temporal_K) | 14,925 | 38.802667 | 162 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DTN_v2.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from torch.autograd import Variable
from torch import nn, einsum
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, helpers, DropPath
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import numpy as np
import random, math
from .utils import *
from .trans_module import *
from utils import uniform_sampling
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
import cv2
np.random.seed(123)
random.seed(123)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0., apply_transform=False, knn_attention=0.7):
super().__init__()
self.layers = nn.ModuleList([])
self.cls_embed = [None for _ in range(depth)]
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout,
apply_transform=apply_transform, knn_attention=knn_attention)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
]))
def forward(self, x):
for ii, (attn, ff) in enumerate(self.layers):
x = attn(x) + x
x = ff(x) + x
self.cls_embed[ii] = x[:, 0]
return x
def get_classEmbd(self):
return self.cls_embed
class clsToken(nn.Module):
def __init__(self, frame_rate, inp_dim):
super().__init__()
self.frame_rate = frame_rate
num_patches = frame_rate
self.cls_token = nn.Parameter(torch.randn(1, 1, inp_dim))
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, inp_dim))
def forward(self, x):
B, N, C = x.shape
cls_token = repeat(self.cls_token, '() n d -> b n d', b=B)
x = torch.cat((cls_token, x), dim=1)
x += self.pos_embedding[:, :(N + 1)]
return x
class DTNNet(nn.Module):
def __init__(self, args, num_classes=249, inp_dim=512, dim_head=64, hidden_dim=768,
heads=8, pool='cls', dropout=0.1, emb_dropout=0.1, mlp_dropout=0.0, branch_merge='pool',
init: bool = False,
warmup_temp_epochs: int = 30,
branchs=3,
dynamic_tms=True):
super().__init__()
self._args = args
print('Temporal Resolution:' )
frame_rate = args.sample_duration // args.intar_fatcer
# names = self.__dict__
self.cls_tokens = nn.ModuleList([])
dynamic_kernel = []
for i in range(branchs):
# names['cls_token_' + str(i)] = nn.Parameter(torch.randn(1, 1, frame_rate))
self.cls_tokens.append(clsToken(frame_rate, inp_dim))
print(frame_rate)
dynamic_kernel.append(int(frame_rate**0.5))
frame_rate += args.sample_duration // args.intar_fatcer
'''
constract multi-branch structures
'''
trans_depth = args.N
self.multi_scale_transformers = nn.ModuleList([])
for ii in range(branchs):
self.multi_scale_transformers.append(
nn.ModuleList([
TemporalInceptionModule(inp_dim, [160,112,224,24,64,64], kernel_size=dynamic_kernel[ii] if dynamic_tms else 3),
MaxPool3dSamePadding(kernel_size=[3, 1, 1], stride=(1, 1, 1), padding=0),
Transformer(inp_dim, trans_depth, heads, dim_head, mlp_dim=hidden_dim, dropout=emb_dropout, knn_attention=args.knn_attention),
nn.Sequential(
nn.LayerNorm(inp_dim),
nn.Dropout(mlp_dropout),
nn.Linear(inp_dim, num_classes))
]))
# num_patches = args.sample_duration
# self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, inp_dim))
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.branch_merge = branch_merge
warmup_temp, temp = map(float, args.temp)
self.temp_schedule = np.concatenate((
np.linspace(warmup_temp,
temp, warmup_temp_epochs),
np.ones(args.epochs - warmup_temp_epochs) * temp
))
# self.show_res = Rearrange('b t (c p1 p2) -> b t c p1 p2', p1=int(small_dim ** 0.5), p2=int(small_dim ** 0.5))
if init:
self.init_weights()
def TC_forward(self):
return self.tc_feat
# @torch.no_grad()
def init_weights(self):
def _init(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(
m.weight) # _trunc_normal(m.weight, std=0.02) # from .initialization import _trunc_normal
if hasattr(m, 'bias') and m.bias is not None:
nn.init.normal_(m.bias, std=1e-6) # nn.init.constant(m.bias, 0)
self.apply(_init)
def forward(self, x): # x size: [2, 64, 512]
B, N, C = x.shape
# Add position embedding
# x += self.pos_embedding
# Local-Global features capturing
outputs = []
temp = self.temp_schedule[self._args.epoch]
for cls_token, (TCNN, MaxPool, TransBlock, mlp) in zip(self.cls_tokens, self.multi_scale_transformers):
# cls_token = self.__dict__['cls_token_{}'.format(i)]
sl = uniform_sampling(x.size(1), cls_token.frame_rate, random=self.training)
sub_x = x[:, sl, :]
sub_x = sub_x.permute(0, 2, 1).view(B, C, -1, 1, 1)
sub_x = MaxPool(TCNN(sub_x))
sub_x = sub_x.permute(0, 2, 1, 3, 4).view(B, -1, C)
sub_x = cls_token(sub_x)
sub_x = TransBlock(sub_x)
sub_x = sub_x[:, 0, :]
out = mlp(sub_x)
outputs.append(out / temp)
# Multi-branch fusion
if self.branch_merge == 'sum':
x = torch.zeros_like(out)
for out in outputs:
x += out
elif self.branch_merge == 'pool':
x = torch.cat([out.unsqueeze(-1) for out in outputs], dim=-1)
x = self.max_pool(x).squeeze()
return x, outputs | 6,286 | 35.982353 | 146 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/utils.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/blob/master/i3d.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import sys
class MaxPool3dSamePadding(nn.MaxPool3d):
def compute_pad(self, dim, s):
if s % self.stride[dim] == 0:
return max(self.kernel_size[dim] - self.stride[dim], 0)
else:
return max(self.kernel_size[dim] - (s % self.stride[dim]), 0)
def forward(self, x, is_pad=True):
(batch, channel, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
if is_pad:
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x)
class Unit3D(nn.Module):
def __init__(self, in_channels,
output_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
padding=0,
activation_fn=F.relu,
use_batch_norm=True,
use_bias=False,
name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels,
out_channels=self._output_channels,
kernel_size=self._kernel_shape,
stride=self._stride,
padding=0,
bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01)
def compute_pad(self, dim, s):
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
else:
return max(self._kernel_shape[dim] - (s % self._stride[dim]), 0)
def forward(self, x):
(batch, channel, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class TemporalInceptionModule(nn.Module):
def __init__(self, in_channels, out_channels, name='temporal', kernel_size=3):
super(TemporalInceptionModule, self).__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_0/Conv3d_0a_1x1')
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_1/Conv3d_0a_1x1')
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[kernel_size, 1, 1],
name=name+'/Branch_1/Conv3d_0b_3x3')
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_2/Conv3d_0a_1x1')
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[kernel_size, 1, 1],
name=name+'/Branch_2/Conv3d_0b_3x3')
self.b3a = MaxPool3dSamePadding(kernel_size=[kernel_size, 1, 1],
stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_3/Conv3d_0b_1x1')
self.name = name
def forward(self, x):
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat([b0,b1,b2,b3], dim=1)
class SpatialInceptionModule(nn.Module):
def __init__(self, in_channels, out_channels, name):
super(SpatialInceptionModule, self).__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_0/Conv3d_0a_1x1')
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_1/Conv3d_0a_1x1')
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[1, 3, 3],
name=name + '/Branch_1/Conv3d_0b_3x3')
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_2/Conv3d_0a_1x1')
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[1, 3, 3],
name=name + '/Branch_2/Conv3d_0b_3x3')
self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3],
stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_3/Conv3d_0b_1x1')
self.name = name
def forward(self, x):
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat([b0, b1, b2, b3], dim=1) | 6,415 | 40.662338 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DSN_v2.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/i3d.py
'''
import torch
from torch import nn, einsum
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import numpy as np
import cv2
import os, math
import sys
from .DTN import DTNNet
from .DTN_v2 import DTNNet as DTNNetV2
from .FRP import FRP_Module
from .utils import *
import os, math
import sys
sys.path.append('../../')
from collections import OrderedDict
from utils import load_pretrained_checkpoint
import logging
class RCMModule(nn.Module):
def __init__(self, args, dim_head=16):
super(RCMModule, self).__init__()
args.recoupling = False
self.args = args
self._distill = True
self.heads = args.sample_duration
self.inp_dim = args.sample_duration
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.avg_pool3d = nn.AdaptiveAvgPool3d(1)
# Self Attention Layers
self.q = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.k = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.scale = dim_head ** -0.5
# Distill MLP
if self._distill:
self.TM_project = nn.Sequential(
nn.Linear(self.inp_dim, self.inp_dim*2, bias=False),
nn.GELU(),
nn.Linear(self.inp_dim*2, self.inp_dim, bias=False),
nn.LayerNorm(self.inp_dim),
)
temp_out = args.sample_duration//2 if args.sample_duration == 64 else args.sample_duration
self.linear = nn.Linear(self.inp_dim, 512)
def forward(self, x):
b, c, t, h, w = x.shape
residual = x
x = rearrange(x, 'b c t h w -> (b t) c h w')
x = self.avg_pool(x)
x = rearrange(x, '(b t) c h w -> b c (t h w)', t=t)
# x = self.norm(x)
q, k = self.q(x), self.k(x)
v = rearrange(residual, 'b c t h w -> b t c (h w)')
q, k = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), [q, k])
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = dots.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# attn = attn.mean(-2, keepdim=True).transpose(2,3)
# out = v * attn.expand_as(v)
out = rearrange(out, 'b t c (h w) -> b c t h w', h=h, w=w)
# out += residual
if self._distill:
temporal_embedding = self.avg_pool3d(out.permute(0, 2, 1, 3, 4)).squeeze()
temporal_project = self.TM_project(temporal_embedding)
temporal_weight = torch.sigmoid(temporal_project)[:, None, :, None, None]
out = out * temporal_weight.expand_as(out)
temporal_weight = temporal_weight.squeeze()
out += residual
return out, self.linear(temporal_project)
class SMSBlock(nn.Module):
def __init__(self, channel_list, kernel_size=None, stride=None, padding=0, name='i3d'):
super(SMSBlock, self).__init__()
in_channels, hidden_channels, out_channels = channel_list
self.end_points = {}
end_point = 'Mixed1'
self.end_points[end_point] = SpatialInceptionModule(in_channels, hidden_channels, name + end_point)
end_point = 'Mixed2'
self.end_points[end_point] = SpatialInceptionModule(sum([hidden_channels[0], hidden_channels[2],
hidden_channels[4], hidden_channels[5]]), out_channels, name + end_point)
if kernel_size is not None:
end_point = 'MaxPool3d_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=kernel_size, stride=stride,
padding=padding)
self.build()
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x):
for end_point in self.end_points.keys():
x = self._modules[end_point](x)
return x
class Channel_Pooling(nn.Module):
def __init__(self):
super(Channel_Pooling, self).__init__()
self.max_pool = nn.MaxPool3d(kernel_size=(2,1,1), stride=(2, 1, 1), padding=0)
def forward(self, x):
# x size: torch.Size([16, 512, 16, 7, 7])
x = x.transpose(1, 2)
x = self.max_pool(x)
return x.transpose(1, 2)
class DSNNetV2(nn.Module):
def __init__(self, args, num_classes=400, spatial_squeeze=True, name='inception_i3d', in_channels=3,
pretrained: bool = False,
sms_depth: int = 3,
dropout_spatial_prob: float=0.0,
frames_drop_rate: float=0.0):
super(DSNNetV2, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self.args = args
self.stem = nn.Sequential(
Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[1, 7, 7],
stride=(1, 2, 2), padding=(0, 3, 3), name=name + 'Conv3d_1a_7x7'),
MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0),
Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
name=name + 'Conv3d_2b_1x1'),
Unit3D(in_channels=64, output_channels=192, kernel_shape=[1, 3, 3],
padding=(0, 1, 1), name=name + 'Conv3d_2c_3x3'),
MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
)
'''
Spatial Multi-scale Features Learning
'''
sms_block = [
# input_dim, hidden_dim, output_dim
[192, [64, 96, 128, 16, 32, 32], [128, 128, 192, 32, 96, 64]],
[128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], [160, 112, 224, 24, 64, 64]],
[160 + 224 + 64 + 64, [256, 160, 320, 32, 128, 128], [384, 192, 384, 48, 128, 128]],
]
assert len(sms_block) == sms_depth
self.SMS_layers = nn.ModuleList([])
for i in range(sms_depth):
if i == 0:
self.SMS_layers.append(
SMSBlock(sms_block[i], kernel_size=[1,3,3], stride=(1,2,2), padding=0)
)
elif i==1:
self.SMS_layers.append(
SMSBlock(sms_block[i], kernel_size=[1,2,2], stride=(1,2,2), padding=0)
)
elif i==2:
self.SMS_layers.append(SMSBlock(sms_block[i], kernel_size=[1,1,1], stride=(1,1,1), padding=0))
self.SMS_layers.append(Channel_Pooling()),
self.SMS_layers.append(RCMModule(args))
self.LinearMap = nn.Sequential(
nn.Dropout(dropout_spatial_prob),
nn.LayerNorm(512),
nn.Linear(512, 512),
)
self.avg_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.dtn = DTNNetV2(args, num_classes=self._num_classes)
self.rrange = Rearrange('b c t h w -> b t c h w')
self.frames_droupout = torch.nn.Dropout2d(p=frames_drop_rate, inplace=False)
# Feature visualization
self.feat = None
self.visweight = None
def get_visualization(self):
return self.feat, self.visweight
def build(self):
for k in self.SMS_layers.keys():
self.add_module(k, self.SMS_layers[k])
def forward(self, x, garr=None):
inp = x
x = self.stem(x)
temp_out = []
for i, sms_layer in enumerate(self.SMS_layers):
x = sms_layer(x)
if isinstance(x, tuple):
x, temp_w = x
temp_out.append(temp_w)
if i == 1:
f = x
self.feat = x.data
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
x = self.frames_droupout(x)
cnn_vison = self.rrange(f.sum(dim=1, keepdim=True))
self.visweight = torch.sigmoid(x[0])
# logits, _, (att_map, cosin_similar, MHAS, visweight) = self.dtn(x)
x, (xs, xm, xl) = self.dtn(x)
target_out = []
for j in range(len(self.dtn.multi_scale_transformers)):
target_out.append(self.dtn.multi_scale_transformers[j][2].get_classEmbd())
return (x, xs, xm, xl), (temp_out, target_out)
| 8,548 | 36.827434 | 137 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/trans_module.py | '''
This file is modified from:
https://github.com/rishikksh20/CrossViT-pytorch/blob/master/crossvit.py
'''
import torch
from torch import nn, einsum
import torch.nn.functional as F
import math
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# class FeedForward(nn.Module):
# def __init__(self, dim, hidden_dim, dropout=0.):
# super().__init__()
# self.net = nn.Sequential(
# nn.Linear(dim, hidden_dim),
# nn.GELU(),
# nn.Dropout(dropout),
# nn.Linear(hidden_dim, dim),
# nn.Dropout(dropout)
# )
# def forward(self, x):
# return self.net(x)
class FeedForward(nn.Module):
"""FeedForward Neural Networks for each position"""
def __init__(self, dim, hidden_dim, dropout=0.):
super().__init__()
self.fc1 = nn.Linear(dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# (B, S, D) -> (B, S, D_ff) -> (B, S, D)
return self.dropout(self.fc2(self.dropout(F.gelu(self.fc1(x)))))
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0., apply_transform=False, transform_scale=True, knn_attention=0.7):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.apply_transform = apply_transform
self.knn_attention = bool(knn_attention)
self.topk = knn_attention
if apply_transform:
self.reatten_matrix = torch.nn.Conv2d(heads, heads, 1, 1)
self.var_norm = torch.nn.BatchNorm2d(heads)
self.reatten_scale = self.scale if transform_scale else 1.0
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
self.scores = None
def forward(self, x):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if self.knn_attention:
mask = torch.zeros(b, self.heads, n, n, device=x.device, requires_grad=False)
index = torch.topk(dots, k=int(dots.size(-1)*self.topk), dim=-1, largest=True)[1]
mask.scatter_(-1, index, 1.)
dots = torch.where(mask > 0, dots, torch.full_like(dots, float('-inf')))
attn = dots.softmax(dim=-1)
if self.apply_transform:
attn = self.var_norm(self.reatten_matrix(attn)) * self.reatten_scale
self.scores = attn
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
| 3,442 | 30.87963 | 126 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DTN.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from torch.autograd import Variable
from torch import nn, einsum
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, helpers, DropPath
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import numpy as np
import random, math
from .utils import *
from .trans_module import *
np.random.seed(123)
random.seed(123)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0., apply_transform=False, knn_attention=0.7):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout,
apply_transform=apply_transform, knn_attention=knn_attention)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class MultiScaleTransformerEncoder(nn.Module):
def __init__(self, args, small_dim=1024, small_depth=4, small_heads=8, small_dim_head=64, hidden_dim_small=768,
media_dim=1024, media_depth=4, media_heads=8, media_dim_head=64, hidden_dim_media=768,
large_dim=1024, large_depth=4, large_heads=8, large_dim_head=64, hidden_dim_large=768,
dropout=0.):
super().__init__()
self.transformer_enc_small = Transformer(small_dim, small_depth, small_heads, small_dim_head,
mlp_dim=hidden_dim_small, dropout=dropout, knn_attention=args.knn_attention)
self.transformer_enc_media = Transformer(media_dim, media_depth, media_heads, media_dim_head,
mlp_dim=hidden_dim_media, dropout=dropout, knn_attention=args.knn_attention)
self.transformer_enc_large = Transformer(large_dim, large_depth, large_heads, large_dim_head,
mlp_dim=hidden_dim_large, dropout=dropout, knn_attention=args.knn_attention)
self.Mixed_small = TemporalInceptionModule(512, [160,112,224,24,64,64], 'Mixed_small')
self.Mixed_media = TemporalInceptionModule(512, [160,112,224,24,64,64], 'Mixed_media')
self.Mixed_large = TemporalInceptionModule(512, [160, 112, 224, 24, 64, 64], 'Mixed_large')
self.MaxPool = MaxPool3dSamePadding(kernel_size=[3, 1, 1], stride=(1, 1, 1), padding=0)
self.class_embedding = None
def forward(self, xs, xm, xl, Local_flag=False):
# Local Modeling
if Local_flag:
cls_small = xs[:, 0]
xs = self.Mixed_small(xs[:, 1:, :].permute(0, 2, 1).view(xs.size(0), xs.size(-1), -1, 1, 1))
xs = self.MaxPool(xs)
xs = torch.cat((cls_small.unsqueeze(1), xs.view(xs.size(0), xs.size(1), -1).permute(0, 2, 1)), dim=1)
cls_media = xm[:, 0]
xm = self.Mixed_media(xm[:, 1:, :].permute(0, 2, 1).view(xm.size(0), xm.size(-1), -1, 1, 1))
xm = self.MaxPool(xm)
xm = torch.cat((cls_media.unsqueeze(1), xm.view(xm.size(0), xm.size(1), -1).permute(0, 2, 1)), dim=1)
cls_large = xl[:, 0]
xl = self.Mixed_large(xl[:, 1:, :].permute(0, 2, 1).view(xl.size(0), xl.size(-1), -1, 1, 1))
xl = self.MaxPool(xl)
xl = torch.cat((cls_large.unsqueeze(1), xl.view(xl.size(0), xl.size(1), -1).permute(0, 2, 1)), dim=1)
# Global Modeling
xs = self.transformer_enc_small(xs)
xm = self.transformer_enc_media(xm)
xl = self.transformer_enc_large(xl)
self.class_embedding = xs[:, 0] + xm[:, 0] + xl[:, 0]
return xs, xm, xl
class RCMModule(nn.Module):
def __init__(self, args, dim_head=64, method='New', merge='GAP'):
super(RCMModule, self).__init__()
self.merge = merge
self.heads = args.SEHeads
self.inp_dim = args.sample_duration
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.avg_pool3d = nn.AdaptiveAvgPool3d((None, 1, None))
# Self Attention Layers
self.q = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.k = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.scale = dim_head ** -0.5
self.method = method
if method == 'Ori':
self.norm = nn.LayerNorm(128)
self.project = nn.Sequential(
nn.Linear(self.inp_dim, 512, bias=False),
nn.GELU(),
nn.Linear(512, 512, bias=False),
nn.LayerNorm(512)
)
elif method == 'New':
if args.dataset == 'THU':
hidden_dim = 128
else:
hidden_dim = 256
self.project = nn.Sequential(
nn.Linear(self.inp_dim, hidden_dim, bias=False),
nn.GELU(),
nn.Linear(hidden_dim, self.inp_dim, bias=False),
nn.LayerNorm(self.inp_dim),
)
self.linear = nn.Linear(self.inp_dim, 512)
# init.kaiming_uniform_(self.linear, a=math.sqrt(5))
if self.heads > 1:
self.mergefc = nn.Sequential(
nn.Dropout(0.4),
nn.Linear(512 * self.heads, 512, bias=False),
nn.LayerNorm(512)
)
def forward(self, x):
b, c, t = x.shape
inp = x.clone()
# Sequence (Y) direction
xd_weight = self.project(self.avg_pool(inp.permute(0, 2, 1)).view(b, -1))
xd_weight = torch.sigmoid(xd_weight).view(b, -1, 1)
# Feature (X) direction
q, k = self.q(x), self.k(x)
q, k = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), [q, k])
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if self.merge == 'mean':
dots = dots.mean(dim=2)
elif self.merge == 'GAP':
dots = self.avg_pool3d(dots).squeeze()
if self.heads > 1:
dots = dots.view(b, -1)
dots = self.mergefc(dots)
else:
dots = dots.squeeze()
y = torch.sigmoid(dots).view(b, c, 1)
if self.method == 'Ori':
out = x * (y.expand_as(x) + xd_weight.expand_as(x))
visweight = xd_weight # for visualization
return out, xd_weight, visweight
elif self.method == 'New':
weight = einsum('b i d, b j d -> b i j', xd_weight, y)
out = x * weight.permute(0, 2, 1)
visweight = weight # for visualization
return out, self.linear(xd_weight.squeeze()), visweight
class DTNNet(nn.Module):
def __init__(self, args, num_classes=249, small_dim=512, media_dim=512, large_dim=512,
small_depth=1, media_depth=1, large_depth=1,
heads=8, pool='cls', dropout=0.1, emb_dropout=0.0, branch_merge='pool',
init: bool = False,
warmup_temp_epochs: int = 30):
super().__init__()
self.low_frames = args.sample_duration // args.intar_fatcer
self.media_frames = self.low_frames + args.sample_duration // args.intar_fatcer
self.high_frames = self.media_frames + args.sample_duration // args.intar_fatcer
print('Temporal Resolution:', self.low_frames, self.media_frames, self.high_frames)
self.branch_merge = branch_merge
self._args = args
warmup_temp, temp = map(float, args.temp)
multi_scale_enc_depth = args.N
num_patches_small = self.low_frames
num_patches_media = self.media_frames
num_patches_large = self.high_frames
self.pos_embedding_small = nn.Parameter(torch.randn(1, num_patches_small + 1, small_dim))
self.cls_token_small = nn.Parameter(torch.randn(1, 1, small_dim))
self.dropout_small = nn.Dropout(emb_dropout)
# trunc_normal_(self.pos_embedding_small, std=.02)
# trunc_normal_(self.cls_token_small, std=.02)
self.pos_embedding_media = nn.Parameter(torch.randn(1, num_patches_media + 1, media_dim))
self.cls_token_media = nn.Parameter(torch.randn(1, 1, media_dim))
self.dropout_media = nn.Dropout(emb_dropout)
# trunc_normal_(self.pos_embedding_media, std=.02)
# trunc_normal_(self.cls_token_media, std=.02)
self.pos_embedding_large = nn.Parameter(torch.randn(1, num_patches_large + 1, large_dim))
self.cls_token_large = nn.Parameter(torch.randn(1, 1, large_dim))
self.dropout_large = nn.Dropout(emb_dropout)
# trunc_normal_(self.pos_embedding_large, std=.02)
# trunc_normal_(self.cls_token_large, std=.02)
self.multi_scale_transformers = nn.ModuleList([])
for _ in range(multi_scale_enc_depth):
self.multi_scale_transformers.append(
MultiScaleTransformerEncoder(args, small_dim=small_dim, small_depth=small_depth,
small_heads=heads,
media_dim=media_dim, media_depth=media_depth,
media_heads=heads,
large_dim=large_dim, large_depth=large_depth,
large_heads=heads,
dropout=dropout))
self.pool = pool
# self.to_latent = nn.Identity()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.max_pool = nn.AdaptiveMaxPool1d(1)
if self._args.recoupling:
self.rcm = RCMModule(args)
if args.Network != 'FusionNet':
self.mlp_head_small = nn.Sequential(
nn.LayerNorm(small_dim),
nn.Dropout(self._args.drop),
nn.Linear(small_dim, num_classes),
)
self.mlp_head_media = nn.Sequential(
nn.LayerNorm(media_dim),
nn.Dropout(self._args.drop),
nn.Linear(media_dim, num_classes),
)
self.mlp_head_large = nn.Sequential(
nn.LayerNorm(large_dim),
nn.Dropout(self._args.drop),
nn.Linear(large_dim, num_classes),
)
self.show_res = Rearrange('b t (c p1 p2) -> b t c p1 p2', p1=int(small_dim ** 0.5), p2=int(small_dim ** 0.5))
self.temp_schedule = np.concatenate((
np.linspace(warmup_temp,
temp, warmup_temp_epochs),
np.ones(args.epochs - warmup_temp_epochs) * temp
))
if init:
self.init_weights()
self.trans_feature = None
if self._args.temporal_consist:
self.TCMLP = nn.Sequential(
nn.ReLU(),
nn.Linear(small_dim, 1024),
nn.Dropout(0.1)
)
self.temporal_reduce = nn.Conv2d(in_channels=num_patches_small + num_patches_media + num_patches_large,
out_channels=num_patches_media,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.t_conv_group = nn.Sequential(
nn.ConvTranspose2d( in_channels=num_patches_media, out_channels=num_patches_media//2, stride=2, kernel_size=3, padding=1, output_padding=1,
dilation=1, padding_mode="zeros", bias=False ),
nn.BatchNorm2d(num_patches_media//2),
nn.ReLU(),
nn.ConvTranspose2d( in_channels=num_patches_media//2, out_channels=3, stride=2, kernel_size=3, padding=1, output_padding=1,
dilation=1, padding_mode="zeros", bias=False ),
nn.BatchNorm2d(3),
nn.ReLU(),
)
else:
self.tc_feat = None
def get_trans_feature(self):
return self.trans_feature
def TC_forward(self):
return self.tc_feat
# @torch.no_grad()
def init_weights(self):
def _init(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(
m.weight) # _trunc_normal(m.weight, std=0.02) # from .initialization import _trunc_normal
if hasattr(m, 'bias') and m.bias is not None:
nn.init.normal_(m.bias, std=1e-6) # nn.init.constant(m.bias, 0)
self.apply(_init)
# ----------------------------------
# frames simple function
# ----------------------------------
def f(self, n, sn):
SL = lambda n, sn: [(lambda n, arr: n if arr == [] else random.choice(arr))(n * i / sn,
range(int(n * i / sn),
max(int(n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
return SL(n, sn)
def forward(self, img): # img size: [2, 64, 1024]
# ----------------------------------
# Recoupling:
# ----------------------------------
if self._args.recoupling:
img, spatial_weights, visweight = self.rcm(img.permute(0, 2, 1))
img = img.permute(0, 2, 1)
else:
visweight = img
# ----------------------------------
sl_low = self.f(img.size(1), self.low_frames)
xs = img[:, sl_low, :]
b, n, _ = xs.shape
cls_token_small = repeat(self.cls_token_small, '() n d -> b n d', b=b)
xs = torch.cat((cls_token_small, xs), dim=1)
xs += self.pos_embedding_small[:, :(n + 1)]
xs = self.dropout_small(xs)
# ----------------------------------
sl_media = self.f(img.size(1), self.media_frames)
xm = img[:, sl_media, :]
b, n, _ = xm.shape
cls_token_media = repeat(self.cls_token_media, '() n d -> b n d', b=b)
xm = torch.cat((cls_token_media, xm), dim=1)
xm += self.pos_embedding_media[:, :(n + 1)]
xm = self.dropout_media(xm)
# ----------------------------------
sl_high = self.f(img.size(1), self.high_frames)
xl = img[:, sl_high, :]
b, n, _ = xl.shape
cls_token_large = repeat(self.cls_token_large, '() n d -> b n d', b=b)
xl = torch.cat((cls_token_large, xl), dim=1)
xl += self.pos_embedding_large[:, :(n + 1)]
xl = self.dropout_large(xl)
# ----------------------------------
# Temporal Multi-scale features learning
# ----------------------------------
Local_flag = True
for multi_scale_transformer in self.multi_scale_transformers:
xs, xm, xl = multi_scale_transformer(xs, xm, xl, Local_flag)
Local_flag = False
self.trans_feature = xm[:, 1:]
if self._args.temporal_consist:
tc_feat = self.TCMLP(torch.cat((xs[:, 1:], xm[:, 1:], xl[:, 1:]), dim=1)) #[b, s+m+l, 1024]
tc_feat = rearrange(tc_feat, 'b n (h w) -> b n h w', h=int(tc_feat.size(-1) ** 0.5))
tc_feat = self.temporal_reduce(tc_feat)
self.tc_feat = self.t_conv_group(tc_feat)
xs = xs.mean(dim=1) if self.pool == 'mean' else xs[:, 0]
xm = xm.mean(dim=1) if self.pool == 'mean' else xm[:, 0]
xl = xl.mean(dim=1) if self.pool == 'mean' else xl[:, 0]
if self._args.recoupling:
T = self._args.temper
distillation_loss = F.kl_div(F.log_softmax(spatial_weights.squeeze() / T, dim=-1),
F.softmax(((xs + xm + xl) / 3.).detach() / T, dim=-1),
reduction='sum')
else:
distillation_loss = torch.tensor(0.0).cuda()
if self._args.Network != 'FusionNet':
if self._args.sharpness:
temp = self.temp_schedule[self._args.epoch]
xs = self.mlp_head_small(xs) / temp
xm = self.mlp_head_media(xm) / temp
xl = self.mlp_head_large(xl) / temp
else:
xs = self.mlp_head_small(xs)
xm = self.mlp_head_media(xm)
xl = self.mlp_head_large(xl)
if self.branch_merge == 'sum':
x = xs + xm + xl
elif self.branch_merge == 'pool':
x = self.max_pool(torch.cat((xs.unsqueeze(2), xm.unsqueeze(2), xl.unsqueeze(2)), dim=-1)).squeeze()
# ---------------------------------
# Get score from multi-branch Trans for visualization
# ---------------------------------
scores_small = self.multi_scale_transformers[2].transformer_enc_small.layers[-1][0].fn.scores
scores_media = self.multi_scale_transformers[2].transformer_enc_media.layers[-1][0].fn.scores
scores_large = self.multi_scale_transformers[2].transformer_enc_large.layers[-1][0].fn.scores
# resize attn
attn_media = scores_media.detach().clone()
attn_media.resize_(*scores_small.size())
attn_large = scores_large.detach().clone()
attn_large.resize_(*scores_small.size())
att_small = scores_small.detach().clone()
scores = torch.cat((att_small, attn_media, attn_large), dim=1) # [2, 24, 17, 17]
att_map = torch.zeros(scores.size(0), scores.size(1), scores.size(1), dtype=torch.float)
for b in range(scores.size(0)):
for i, s1 in enumerate(scores[b]):
for j, s2 in enumerate(scores[b]):
cosin_simil = torch.cosine_similarity(s1.view(1, -1), s2.view(1, -1))
att_map[b][i][j] = cosin_simil
# --------------------------------
# Measure cosine similarity of xs and xl
# --------------------------------
cosin_similar_xs_xm = torch.cosine_similarity(xs[0], xm[0], dim=-1)
cosin_similar_xs_xl = torch.cosine_similarity(xs[0], xl[0], dim=-1)
cosin_similar_xm_xl = torch.cosine_similarity(xm[0], xl[0], dim=-1)
cosin_similar_sum = cosin_similar_xs_xm + cosin_similar_xs_xl + cosin_similar_xm_xl
return (x, xs, xm, xl), distillation_loss, (att_map, cosin_similar_sum.cpu(),
(scores_small[0], scores_media[0], scores_large[0]), visweight[0]) | 18,908 | 42.87239 | 159 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DSN_Fusion.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/i3d.py
'''
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import os, math
import sys
from .DTN import DTNNet
from .FRP import FRP_Module
from .utils import *
import os, math
import sys
sys.path.append('../../')
from collections import OrderedDict
from utils import load_pretrained_checkpoint
import logging
class DSNNet(nn.Module):
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c'
)
def __init__(self, args, num_classes=400, spatial_squeeze=True, name='inception_i3d', in_channels=3, dropout_keep_prob=0.5,
pretrained: str = False):
super(DSNNet, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self.logits = None
self.args = args
self.end_points = {}
'''
Low Level Features Extraction
'''
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[1, 7, 7],
stride=(1, 2, 2), padding=(0, 3, 3), name=name + end_point)
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
name=name + end_point)
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[1, 3, 3],
padding=(0, 1, 1),
name=name + end_point)
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
'''
Spatial Multi-scale Features Learning
'''
end_point = 'Mixed_3b'
self.end_points[end_point] = SpatialInceptionModule(192, [64, 96, 128, 16, 32, 32], name + end_point)
end_point = 'Mixed_3c'
self.end_points[end_point] = SpatialInceptionModule(256, [128, 128, 192, 32, 96, 64], name + end_point)
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_4b'
self.end_points[end_point] = SpatialInceptionModule(128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], name + end_point)
end_point = 'Mixed_4c'
self.end_points[end_point] = SpatialInceptionModule(192 + 208 + 48 + 64, [160, 112, 224, 24, 64, 64], name + end_point)
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 2, 2], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_5b'
self.end_points[end_point] = SpatialInceptionModule(160 + 224 + 64 + 64, [256, 160, 320, 32, 128, 128],
name + end_point)
end_point = 'Mixed_5c'
self.end_points[end_point] = SpatialInceptionModule(256 + 320 + 128 + 128, [384, 192, 384, 48, 128, 128],
name + end_point)
self.LinearMap = nn.Sequential(
nn.LayerNorm(1024),
nn.Linear(1024, 512),
)
self.avg_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.build()
self.dtn = DTNNet(args, num_classes=self._num_classes)
self.rrange = Rearrange('b c t h w -> b t c h w')
if args.frp:
self.frp_module = FRP_Module(w=args.w, inplanes=64)
if pretrained:
load_pretrained_checkpoint(self, pretrained)
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x=None, garr=None, endpoint=None):
if endpoint == 'spatial':
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
if end_point in ['Mixed_3b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
elif end_point in ['Mixed_4b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
f = x
elif end_point in ['Mixed_5b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
else:
x = self._modules[end_point](x)
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
return x
else:
logits, distillation_loss, (att_map, cosin_similar, MHAS, visweight) = self.dtn(x)
return logits, distillation_loss, (att_map, cosin_similar, MHAS, visweight)
| 5,925 | 36.27044 | 127 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/models.py | """
This file is modified from:
https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/deit.py
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial
from einops import rearrange, repeat
import torch.nn.functional as nnf
from torchvision.utils import save_image, make_grid
import numpy as np
import cv2
import random
random.seed(123)
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, helpers, DropPath
from timm.models.resnet import Bottleneck, ResNet
from timm.models.resnet import _cfg as _cfg_resnet
from timm.models.helpers import build_model_with_cfg
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384',
]
def TokensCutOff(x, tua = 0.4):
CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)
tokens = x[:, 2:, :]
B, N, C = tokens.shape
mask = torch.ones(B, N, requires_grad=False).cuda()
prob = torch.rand(B, N, requires_grad=False).cuda()
mask = torch.where(prob > tua, mask, torch.full_like(mask, 1e-8))
TokenMask = mask.view(B, N, 1).expand_as(tokens)
x = tokens * TokenMask
x = torch.cat((CLS, DIS, x), dim=1)
return x
def FeatureCutOff(x, tua = 0.4):
CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)
tokens = x[:, 2:, :]
B, N, C = tokens.shape
mask = torch.ones(B, C, requires_grad=False).cuda()
prob = torch.rand(B, C, requires_grad=False).cuda()
mask = torch.where(prob > tua, mask, torch.full_like(mask, 1e-8))
TokenMask = mask.view(B, 1, C).expand_as(tokens)
x = tokens * TokenMask
x = torch.cat((CLS, DIS, x), dim=1)
return x
def shuffle_unit(features, shift, group, begin=0, return_idex=False):
batchsize = features.size(0)
dim = features.size(-1)
labels = torch.arange(0, features.size(-2), 1, device=features.device).expand(batchsize, -1)
# Shift Operation
feature_random = torch.cat([features[:, begin-1+shift:], features[:, begin:begin-1+shift]], dim=1)
labels = torch.cat([labels[:, begin-1+shift:], labels[:, begin:begin-1+shift]], dim=1)
x = feature_random
# Patch Shuffle Operation
x = x.view(batchsize, group, -1, dim)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, -1, dim)
labels = labels.view(batchsize, group, -1, 1)
labels = torch.transpose(labels, 1, 2).contiguous()
labels = labels.view(batchsize, -1)
if return_idex:
return x, labels
return x
def random_shuffle_unit(features, return_idex=False, batch_premutation=False, sort_label=None):
if sort_label:
B, N, C = features.shape
labels = []
perms_idx = []
for b in range(B):
perm_idx = random.choice(list(sort_label.keys()))
label = sort_label[perm_idx]
perms_idx.append(perm_idx + b * N)
labels.append(label)
perms_idx = torch.cat(perms_idx)
x = features.contiguous().view(-1, C)
x = x[perms_idx, :]
x = x.view(B, N, C)
if return_idex:
return x, torch.tensor(labels, device=features.device), perms_idx
if batch_premutation:
B, N, C = features.shape
labels = torch.arange(0, N, 1, device=features.device)
# labels = (labels - labels.min())/(labels.max() - labels.min()) + 1e-8
labels = labels.expand(B, -1)
# perturbation = torch.rand([B, N], device=features.device) - torch.rand([B, N], device=features.device)
# labels = labels + perturbation
index = torch.cat([torch.randperm(N) + b * N for b in range(B)], dim=0)
x = features.contiguous().view(-1, C)
x = x[index, :]
x = x.view(B, N, C)
labels = labels.contiguous().view(-1)[index].view(B, -1)
else:
batchsize = features.size(0)
dim = features.size(-1)
num_patch = features.size(-2)
labels = torch.arange(0, features.size(-2), 1, device=features.device)
# labels = (labels - labels.min())/(labels.max() - labels.min()) + 1e-8
labels = labels.expand(batchsize, -1)
# perturbation = torch.rand([B, N]) - torch.rand([B, N])
# labels = labels + perturbation
index = torch.randperm(features.size(-2))
labels = labels[:, index]
x = features[:, index, :]
if return_idex:
return x, labels, index
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn = None
def forward(self, x):
xori = x
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
self.attn = attn
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_attn(self):
return self.attn
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = helpers.to_2tuple(drop)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Block(nn.Module):
def __init__(self, args, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class DistilledVisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
self._args = kwargs['args']
del kwargs['args']
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
# dpr = [x.item() for x in torch.linspace(0, kwargs['drop_path_rate'], kwargs['depth'])] # stochastic depth decay rule
# self.blocks = nn.Sequential(*[
# Block(
# dim=kwargs['embed_dim'], num_heads=kwargs['num_heads'], mlp_ratio=kwargs['mlp_ratio'], qkv_bias=kwargs['qkv_bias'], drop=kwargs['drop_rate'],
# attn_drop=kwargs['attn_drop_rate'], drop_path=dpr[i], norm_layer=kwargs['norm_layer'], act_layer=kwargs['act_layer'])
# for i in range(kwargs['depth'])])
trunc_normal_(self.dist_token, std=.02)
trunc_normal_(self.pos_embed, std=.02)
self.head_dist.apply(self._init_weights)
self.shuffle = self._args.shuffle
self.Token_cutoff = self._args.Token_cutoff
self.tua_token = self._args.tua_token
self.Feature_cutoff = self._args.Feature_cutoff
self.tua_feature = self._args.tua_feature
def forward_features(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
if self.shuffle and self.training:
CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)
x = shuffle_unit(x[:, 2:, :], shift=8, group=2)
x = torch.cat((CLS, DIS, x), dim=1)
for blk in self.blocks:
x = blk(x)
if self.Token_cutoff and self.training:
x = TokensCutOff(x, self.tua_token)
if self.Feature_cutoff and self.training:
x = FeatureCutOff(x, self.tua_feature)
x = self.norm(x)
return x[:, 0], x[:, 1]
def forward(self, x):
x, x_dist = self.forward_features(x)
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.training:
return x, x_dist
else:
# during inference, return the average of both classifier predictions
return (x + x_dist) / 2
class Video2Image(nn.Module):
def __init__(self, inp_channel=16):
super(Video2Image, self).__init__()
# self.MLP = nn.Sequential(
# # input: [B, N, C]
# nn.Linear(C, C//2),
# nn.ReLU(),
# nn.Linear(C//2, C)
# )
self.channel1 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.channel2 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.channel3 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(3)
self.relu = nn.ReLU(inplace=False)
self.channel1_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.channel2_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.channel3_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reverse = nn.BatchNorm3d(3)
self.relu_reverse = nn.ReLU(inplace=False)
self.compressed = None
def get_compressed_img(self):
return self.compressed
def forward(self, x):
B, C, T, H, W = x.shape
# x = rearrange(x, 'b c t h w -> (b c) t h w)')
x_channel1 = self.channel1(x[:, 0, :, :, :])
x_channel2 = self.channel2(x[:, 1, :, :, :])
x_channel3 = self.channel3(x[:, 2, :, :, :])
x = torch.cat((x_channel1, x_channel2, x_channel3), dim=1)
x = self.relu(self.bn(x))
self.compressed = x
x_channel1_reverse = self.channel1_reverse(x[:, 0, :, :].unsqueeze(1))
x_channel2_reverse = self.channel2_reverse(x[:, 1, :, :].unsqueeze(1))
x_channel3_reverse = self.channel3_reverse(x[:, 2, :, :].unsqueeze(1))
x_reverse = torch.cat((x_channel1_reverse.unsqueeze(1), x_channel2_reverse.unsqueeze(1), x_channel3_reverse.unsqueeze(1)), dim=1)
x_reverse = self.relu_reverse(self.bn_reverse(x_reverse))
return x, x_reverse
class VisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
self._args = kwargs['args']
del kwargs['args']
super().__init__(*args, **kwargs)
dpr = [x.item() for x in torch.linspace(0, kwargs['drop_path_rate'], kwargs['depth'])] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
self._args, dim=kwargs['embed_dim'], num_heads=kwargs['num_heads'], mlp_ratio=kwargs['mlp_ratio'], qkv_bias=kwargs['qkv_bias'], drop=kwargs['drop_rate'],
drop_path=dpr[i], norm_layer=kwargs['norm_layer'])
for i in range(kwargs['depth'])])
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches+1, self.embed_dim))
trunc_normal_(self.pos_embed, std=.02)
self.video2Img = Video2Image(self._args.sample_duration)
def get_cls_token(self):
return self.CLSToken
def get_patch_token(self):
return self.PatchToken
def forward_features(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_token, x), dim=1)
x = self.pos_drop(x + self.pos_embed)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
self.PatchToken = x[:, 1:]
return self.pre_logits(x[:, 0])
def forward(self, x):
# x.size: torch.Size([16, 3, 16, 224, 224])
x, x_reverse = self.video2Img(x)
x = self.forward_features(x)
self.CLSToken = x
x = self.head(x)
return x, x_reverse
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
def _create_resnet(variant, pretrained=False, **kwargs):
del kwargs['args']
return build_model_with_cfg(ResNet, variant, pretrained, default_cfg=_cfg_resnet(), **kwargs)
@register_model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
model = _create_resnet('resnet50', pretrained, **model_args)
# model.default_cfg = _cfg_resnet()
return model
@register_model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
model = _create_resnet('resnet101', pretrained, **model_args)
return model | 19,803 | 37.984252 | 169 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/model_ema.py | """
This file is modified from:
https://github.com/rwightman/pytorch-image-models/blob/main/timm/utils/model_ema.py
Exponential Moving Average (EMA) of model updates
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
_logger = logging.getLogger(__name__)
class ModelEma(nn.Module):
""" Model Exponential Moving Average (DEPRECATED)
Keep a moving average of everything in the model state_dict (parameters and buffers).
This version is deprecated, it does not work with scripted models. Will be removed eventually.
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device='', resume=''):
super(ModelEma, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
try:
# self._load_checkpoint(resume)
checkpoint = torch.load(resume, map_location='cpu')
# self.ema.load_state_dict(checkpoint['model_ema'])
new_state_dict = OrderedDict()
for k, v in checkpoint['model_ema'].items():
name = k[4:]
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
except:
checkpoint = torch.load(resume, map_location='cpu')
# self.ema.load_state_dict(checkpoint['model_ema'])
new_state_dict = OrderedDict()
for k, v in checkpoint['model'].items():
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
print("Loaded state_dict_ema")
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
# correct a mismatch in state dict keys
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
for k, ema_v in self.ema.state_dict().items():
if needs_module:
k = 'module.' + k
model_v = msd[k].detach()
if self.device:
model_v = model_v.to(device=self.device)
ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
def forward(self, x):
return self.ema(x)
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device=None, resume=None):
super(ModelEmaV2, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
def forward(self, x):
return self.module(x)
| 6,797 | 42.858065 | 102 | py |
DMH-Net | DMH-Net-main/visualization_from_json.py | import argparse
import json
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from PIL import Image
from matplotlib.figure import Figure
from tqdm import trange
from e2plabel.e2plabelconvert import generatePerspective, VIEW_NAME, VIEW_ARGS
from postprocess.postprocess2 import _cal_p_pred_emask
from visualization import clearAxesLines
img_hw = (512, 512)
e_img_hw = (512, 1024)
def jsonToCor(filename):
H, W = e_img_hw
with open(filename) as f:
inferenced_result = json.load(f)
cor_id = np.array(inferenced_result['uv'], np.float32)
cor_id[:, 0] *= W
cor_id[:, 1] *= H
return cor_id
def txtToCor(filename):
with open(filename) as f:
cor = np.array([line.strip().split() for line in f if line.strip()], np.float32)
return cor
def resolveImgPath(s: str):
if s.find("pano") == 0 or s.find("camera") == 0:
return os.path.join("data/layoutnet_dataset/test/img", s)
else:
return os.path.join("data/matterport3d_layout/test/img", s)
def resolveGtCorPath(s: str):
if s.find("pano") == 0 or s.find("camera") == 0:
return os.path.join("data/layoutnet_dataset/test/label_cor", os.path.splitext(s)[0] + ".txt")
else:
return os.path.join("data/matterport3d_layout/test/label_cor", os.path.splitext(s)[0] + ".txt")
def corTo2DMask(e_img, cor):
pres = generatePerspective(e_img, cor, VIEW_NAME, VIEW_ARGS, img_hw)
lines = []
for d in pres:
lines.append(torch.tensor(d["lines"]))
masks2d = []
for view_idx in range(6):
thickness = int(round(img_hw[0] * 0.01))
mat = np.zeros((1, *img_hw))
for line in lines[view_idx]:
cv2.line(mat[0], torch.round(line[3:5]).to(torch.int64).numpy(),
torch.round(line[5:7]).to(torch.int64).numpy(), 1.0, thickness=thickness)
masks2d.append(torch.tensor(mat))
masks2d = torch.stack(masks2d)
maskEq = _cal_p_pred_emask(None, masks2d, img_hw, e_img_hw)
return maskEq
def wireframeGetMaskImg(e_img, cor, color) -> torch.Tensor:
maskEq = corTo2DMask(e_img, cor).squeeze()
mask_img = torch.cat([torch.tensor(color).repeat(*maskEq.shape[0:2], 1), maskEq.unsqueeze(-1)], 2)
mask_img = torch.round(mask_img * 255).to(torch.uint8)
return mask_img
def drawWireframeOnEImg(e_img, cor, color):
plt.imshow(wireframeGetMaskImg(e_img, cor, color).cpu().numpy())
fig: Figure = None
def show(output_path, name):
if output_path:
plt.savefig(os.path.join(output_path, imgPath + "." + name + ".png"))
else:
plt.show()
plt.close(fig)
def initFig():
global fig
fig = plt.figure(figsize=(10.24, 5.12))
plt.gcf().subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
ax = plt.gca()
clearAxesLines(ax)
CLASS_A = [
"7y3sRwLe3Va_9b72664399a34e4f9dbe470571c73187.png",
"B6ByNegPMKs_8b1abc1b47784d758b9ec1e079160475.png",
"camera_1a2b3c7901434d88bba55d6f2b28a6d5_office_30_frame_equirectangular_domain_.png",
"camera_7a42df17b40c4c15bfd6301823b6a476_office_22_frame_equirectangular_domain_.png",
"camera_8cbbb3e42c0e4e54b3b523b1fec6b3bc_office_33_frame_equirectangular_domain_.png",
"camera_412ba0d035b5432abd88ed447716f349_office_30_frame_equirectangular_domain_.png",
"camera_514bd77b98cc47ad904d6c8196f769b1_office_8_frame_equirectangular_domain_.png",
"camera_d162082c8f714aee8984195e0c5a7396_office_11_frame_equirectangular_domain_.png",
"e9zR4mvMWw7_f624a40d100144e696a39abe258ee090.png",
"pano_adxsvoaiehisue.png",
"pano_agpqpoljoyzxds.png",
"pano_ahvuapixtvirde.png",
"pano_aixninerbhvojf.png",
"pano_ankughvvgbhsom.png",
"pano_apozlylyjgtjid.png",
"uNb9QFRL6hY_1434b965c3c147419c4ff40310633b58.png",
"x8F5xyUWy9e_2669f5ba693c4e729d7d2c4f3fa0a077.png",
]
CLASS_B = [
"pano_aghlgnaxvjlzmb.png",
"7y3sRwLe3Va_92fb09a83f8949619b9dc5bda2855456.png",
"7y3sRwLe3Va_fdab6422162e49db822a37178ab70481.png",
"B6ByNegPMKs_53249ef8a94c4c40bd6f09c069e54d16.png",
"B6ByNegPMKs_bb2332e3d7ad40a59ee5ad0eae108dec.png",
"B6ByNegPMKs_ce2f5a74556c4be192df3ca7a178cefb.png",
"camera_32caf5752a4746c8b95f84e9acd9271d_office_29_frame_equirectangular_domain_.png",
"camera_63eb2cd447b84c5abac846f79c51dfcd_office_14_frame_equirectangular_domain_.png",
"camera_90af0a7fe0ed4a7db2c2e05727560231_office_15_frame_equirectangular_domain_.png",
"camera_270448008f5743f48f34539d36e4c4ae_office_14_frame_equirectangular_domain_.png",
"pano_auqcjiehbmenao.png",
"wc2JMjhGNzB_6e491bc8576345bda3cdde9ab216b7be.png",
]
CLASS_C_D = [
"7y3sRwLe3Va_9e4c92fd7eb74504baecf55a3264716e.png",
"7y3sRwLe3Va_6376b741b50a4418b3dc3fde791c3c09.png",
"B6ByNegPMKs_5b3d1c9fefb64512b0c9750a00feece4.png",
"B6ByNegPMKs_e5567bd5fa2d4fde8a6b9f15e3274a7e.png",
"e9zR4mvMWw7_5d711de78dbd400aa4cfd51fc05dfbee.png",
"pano_abbvryjplnajxo.png",
"pano_aqdafdzfhdukpg.png",
"uNb9QFRL6hY_d11f14ddecbe406681d4980365ea5a43.png",
"7y3sRwLe3Va_dd83fb40a2e14ac99de9fe9bcfaf44df.png",
"uNb9QFRL6hY_bcce4f23c12744c782c0b49b24a0331a.png",
"camera_a39f4a868cd84429a765324af21c6e6e_office_8_frame_equirectangular_domain_.png",
]
PANO_ARR = []
STF_ARR = []
MATTER_ARR = []
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--img', help='指定一张图片。如果不指定,那就会画所有')
parser.add_argument('--output_path', help='如果不指定,就会plt.show')
parser.add_argument('--draw_independent', "-d", action="store_true", help='独立画图还是一张图画好几次?')
parser.add_argument('--draw_both', "-b", action="store_true", help='两种方法都画')
parser.add_argument('--second', "-2", action="store_true", help='定义此项则画HoHoNet和AtlantaNet,否则画HorizonNet和LayoutNet')
args = parser.parse_args()
GT_COLOR = (0.0, 1.0, 0.0) # 绿
OUR_PATH, OUR_COLOR = "result_json", (1.0, 0.0, 0.0) # 红
HORIZONNET_PATH, HORIZONNET_COLOR = "eval_results/horizonnet_json", (0.0, 0.0, 1.0) # 蓝
LAYOUTNET_PATH, LAYOUTNET_COLOR = "eval_results/layoutnet_json", (1.0, 0.0, 1.0) # 粉
ATLANTANET_PATH, ATLANTANET_COLOR = "eval_results/atlantanet_json", (0.0, 0.0, 1.0) # 蓝
HOHONET_PATH, HOHONET_COLOR = "eval_results/hohonet_json", (1.0, 0.0, 1.0) # 粉
if args.img:
img_list = [args.img]
else:
img_list = [s.replace(".json", "") for s in os.listdir(OUR_PATH)]
# # TODO
# img_list = CLASS_A + CLASS_B + CLASS_C_D
# args.output_path = "result_6_pick"
if args.output_path:
os.makedirs(args.output_path, exist_ok=True)
for i in trange(len(img_list)):
imgPath = img_list[i]
isPanoStf = imgPath.find("pano") == 0 or imgPath.find("camera") == 0
if args.second and isPanoStf: continue
e_img = np.array(Image.open(resolveImgPath(imgPath))) / 255.0
gt_cor = txtToCor(resolveGtCorPath(imgPath))
myJsonPath = os.path.join(OUR_PATH, imgPath + ".json")
with open(myJsonPath) as f:
my_result = json.load(f)
iou3d = my_result["3DIoU"]
# # TODO
# if imgPath.find("pano") == 0:
# PANO_ARR.append(iou3d)
# elif imgPath.find("camera") == 0:
# STF_ARR.append(iou3d)
# else:
# MATTER_ARR.append(iou3d)
# continue
# 画的顺序:gt、layout、horizon、ours
if args.draw_both or (not args.draw_independent):
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
if not args.second:
drawWireframeOnEImg(e_img, txtToCor(os.path.join(LAYOUTNET_PATH, os.path.splitext(imgPath)[0] + (
"_aligned_rgb" if isPanoStf else "") + "_cor_id.txt")), LAYOUTNET_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(HORIZONNET_PATH, os.path.splitext(imgPath)[0] + ".json")),
HORIZONNET_COLOR)
else:
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(ATLANTANET_PATH, os.path.splitext(imgPath)[0] + ".json")),
ATLANTANET_COLOR)
drawWireframeOnEImg(e_img, txtToCor(os.path.join(HOHONET_PATH, os.path.splitext(imgPath)[0] + ".layout.txt")),
HOHONET_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(myJsonPath), OUR_COLOR)
show(args.output_path, "all.{:.2f}".format(iou3d))
if args.draw_both or args.draw_independent:
if not args.second:
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, txtToCor(os.path.join(LAYOUTNET_PATH, os.path.splitext(imgPath)[0] + (
"_aligned_rgb" if isPanoStf else "") + "_cor_id.txt")), LAYOUTNET_COLOR)
show(args.output_path, "lay")
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(HORIZONNET_PATH, os.path.splitext(imgPath)[0] + ".json")),
HORIZONNET_COLOR)
show(args.output_path, "hor")
else:
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(ATLANTANET_PATH, os.path.splitext(imgPath)[0] + ".json")),
ATLANTANET_COLOR)
show(args.output_path, "atl".format(iou3d))
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, txtToCor(os.path.join(HOHONET_PATH, os.path.splitext(imgPath)[0] + ".layout.txt")),
HOHONET_COLOR)
show(args.output_path, "hoh".format(iou3d))
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(myJsonPath), OUR_COLOR)
show(args.output_path, "our.{:.2f}".format(iou3d))
a = 1
# TODO
# import torch
# PANO_ARR = torch.tensor(PANO_ARR).sort(descending=True)[0]
# STF_ARR = torch.tensor(STF_ARR).sort(descending=True)[0]
# MATTER_ARR = torch.tensor(MATTER_ARR).sort(descending=True)[0]
# for a in [PANO_ARR,STF_ARR,MATTER_ARR]:
# pt = [round(len(a) / 4 * (i+1)) for i in range(3)]
# pt = [a[v] for v in pt]
# print(pt)
| 10,947 | 39.850746 | 130 | py |
DMH-Net | DMH-Net-main/visualization.py | import io
import math
import os
import time
from typing import Dict
import cv2
import numpy as np
try:
import open3d as o3d
except:
pass
import torch
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from e2plabel.e2plabelconvert import VIEW_NAME
from perspective_dataset import PerspectiveDataset
from postprocess.postprocess2 import postProcess, get_vote_mask_c_up_down, generatePred2DMask, _cal_p_pred_emask
DRAW_CUBE_POSITIONS = {
"F": [1, 1],
"R": [1, 2],
"B": [1, 3],
"L": [1, 0],
"U": [0, 1],
"D": [2, 1],
"E": [0, 1, 2, 4],
"3D": [2, 3, 2, 4],
"TEXT": [0, 0]
}
DEFAULT_DRAWTYPE = [["c", "y", "x", "gtlines", "e_rm", "3d", "text"]]
# DEFAULT_DRAWTYPE = [['gtlines_colored', 'border'], 'e_gt', ['c_cl2', 'y_cl2', 'x_cl2']] # GT可视化(论文图1)所用的配置
def clearAxesLines(ax: plt.Axes):
ax.set_xticks([])
ax.set_yticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
def getCubeAxes(fig: plt.Figure, view_name):
spec = fig.add_gridspec(3, 4, hspace=0, wspace=0)
posi = DRAW_CUBE_POSITIONS[view_name]
if len(posi) == 2:
ax: plt.Axes = fig.add_subplot(spec[posi[0], posi[1]])
else:
ax: plt.Axes = fig.add_subplot(spec[posi[0]:posi[1], posi[2]:posi[3]])
clearAxesLines(ax)
return ax
def getMaskByType(type, cfg, input, output, img_idx, view_idx):
"""
对每个面画线
"""
p_img = input["p_imgs"][img_idx, view_idx]
prob = None
if type.find("x") == 0:
color_type = 0
if type.find("cl2") != -1: # 论文图1的Line Predictions中所用的颜色:空间中的竖线全为红,水平线则有的蓝有的绿
color_type = getLineColorType(view_idx, color_type)
color = p_img.new_zeros(3)
color[color_type] = 255
if type.find("pk") != -1:
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
prob = output["p_preds_xy"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][0],
input["xLabels"].shape[2], type="nearest_k", base=0.5))
elif type.find("gt") != -1:
prob = input["xLabels"][img_idx, view_idx]
if prob is not None: # 特殊情况,根据prob实时计算mat
mat = prob.unsqueeze(0).expand(p_img.shape[1], -1)
else: # 利用先前在后处理步骤中算完的
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_2dmask"][img_idx][view_idx, 0]
elif type.find("y") == 0:
color_type = 1
if type.find("cl2") != -1:
color_type = getLineColorType(view_idx, color_type)
color = p_img.new_zeros(3)
color[color_type] = 255
if type.find("pk") != -1:
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
prob = output["p_preds_xy"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][1],
input["yLabels"].shape[2], type="nearest_k", base=0.5))
elif type.find("gt") != -1:
prob = input["yLabels"][img_idx, view_idx]
if prob is not None: # 特殊情况,根据prob实时计算mat
mat = prob.unsqueeze(1).expand(-1, p_img.shape[2])
else: # 利用先前在后处理步骤中算完的
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_2dmask"][img_idx][view_idx, 1]
elif type.find("c") == 0:
def _genProb(probs):
"""
把长为(angle_num,2)的,最后一维依次表示cup和cdown的霍夫域上的向量,拼接起来变为,cdown接在cup上的向量
"""
# return torch.cat([probs[:, i] for i in range(probs.shape[1])]) # 该形式是原始的定义,与下面的完全等价但更复杂
return probs.T.reshape(-1)
color_type = 2
if type.find("cl2") != -1:
color_type = getLineColorType(view_idx, color_type)
color = p_img.new_zeros(3)
color[color_type] = 255
if type.find("pk") != -1:
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
prob = torch.cat([
output["p_preds_cud"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][2],
input["cUpLabels"].shape[2],
type="nearest_k", base=0.5)),
output["p_preds_cud"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][3],
input["cDownLabels"].shape[2],
type="nearest_k", base=0.5))])
elif type.find("gt") != -1:
prob = torch.cat(
[input["cUpLabels"][img_idx, view_idx], input["cDownLabels"][img_idx, view_idx]])
elif type.find("raw") != -1:
prob = _genProb(output["raw_cud"][img_idx, view_idx])
if prob is not None: # 特殊情况,根据prob实时计算mat
vote_mask_c_up_down = get_vote_mask_c_up_down(cfg, p_img)
# mat = (prob * vote_mask_c_up_down).max(-1).values # OLD
mat = (prob * vote_mask_c_up_down).sum(-1) / vote_mask_c_up_down.sum(-1)
else: # 利用先前在后处理步骤中算完的
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_2dmask"][img_idx][view_idx, 2]
elif type == "gtlines":
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
thickness = int(round(p_img.shape[1] * 0.004))
mat = np.zeros(p_img.shape[1:])
for line in input["lines"][img_idx][view_idx]:
cv2.line(mat, torch.round(line[3:5]).to(torch.int64).numpy(),
torch.round(line[5:7]).to(torch.int64).numpy(), 1.0, thickness=thickness)
mat = p_img.new_tensor(mat)
else:
return None
return mat, color # mat: (h,w)
def getLineColorType(view_idx, line_type):
if VIEW_NAME[view_idx] == "F" or VIEW_NAME[view_idx] == "B":
if line_type == 0:
rgb = 0
elif line_type == 1:
rgb = 2
else:
rgb = 1
elif VIEW_NAME[view_idx] == "L" or VIEW_NAME[view_idx] == "R":
if line_type == 0:
rgb = 0
elif line_type == 1:
rgb = 1
else:
rgb = 2
elif VIEW_NAME[view_idx] == "U" or VIEW_NAME[view_idx] == "D":
if line_type == 0:
rgb = 1
elif line_type == 1:
rgb = 2
else:
rgb = 0
return rgb
def getGTLines2DMasks(cfg, input, output, img_idx):
masks2d = []
for view_idx in range(6):
p_img = input["p_imgs"][img_idx, view_idx]
thickness = int(round(p_img.shape[1] * 0.01))
mat = np.zeros(p_img.shape)
for line in input["lines"][img_idx][view_idx]:
color_type = getLineColorType(view_idx, line[7])
cv2.line(mat[color_type], torch.round(line[3:5]).to(torch.int64).numpy(),
torch.round(line[5:7]).to(torch.int64).numpy(), 1.0, thickness=thickness)
masks2d.append(input["p_imgs"].new_tensor(mat))
masks2d = torch.stack(masks2d)
maskEq = _cal_p_pred_emask(cfg, masks2d, input["p_imgs"].shape[-2:], input["e_img"].shape[-2:])
return masks2d, maskEq
def cvtRGBMatToDrawingNdArray(input):
"""
:param input tensor(3,h,w) float型 范围0~1
:return ndarray(h,w,4) int型 范围0~255
"""
input4 = torch.cat([input, torch.clamp(input.max(0)[0], 0.0, 1.0).unsqueeze(0)], 0)
return torch.clamp(torch.round(input4 * 255), 0.0, 255.0).permute(1, 2, 0).cpu().to(torch.uint8).numpy()
def drawEqualRectCorners(cfg, ax, type, input, output, img_idx, gt_cor_id, pred_cor_id):
e_img = input["e_img"][img_idx].permute(1, 2, 0).cpu().numpy()
ax.imshow(e_img)
if type.find("r") != -1:
cor = gt_cor_id.cpu().numpy()
ax.scatter(cor[:, 0], cor[:, 1], c="red", s=10)
if pred_cor_id is not None:
cor = pred_cor_id.cpu().numpy()
ax.scatter(cor[:, 0], cor[:, 1], c="green", s=10)
if type.find("m") != -1:
for one_draw_idx in range(3):
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_emask"][img_idx][one_draw_idx]
color = mat.new_zeros(3)
color[one_draw_idx] = 255
mask_img = torch.cat([color.repeat(*mat.shape[0:2], 1), mat.unsqueeze(-1) * 255], 2)
mask_img = torch.round(mask_img).to(torch.uint8)
ax.imshow(mask_img.cpu().numpy())
if type.find("gt") != -1:
_, emask = getGTLines2DMasks(cfg, input, output, img_idx)
drawArray = cvtRGBMatToDrawingNdArray(emask)
ax.imshow(drawArray)
if type.find("w") != -1:
drawWireframeOnEImg(ax, e_img, gt_cor_id, (0.0, 1.0, 0.0))
drawWireframeOnEImg(ax, e_img, pred_cor_id, (1.0, 0.0, 0.0))
def o3dRunVis(vis):
vis.update_geometry()
vis.update_renderer()
vis.poll_events()
vis.run()
def o3dDrawLines(vis, lines_results, lwh, color=None):
points, lines, colors = cvtLinesResultsForDraw(lines_results, lwh, color)
line_pcd = o3d.LineSet()
line_pcd.lines = o3d.Vector2iVector(lines)
line_pcd.colors = o3d.Vector3dVector(colors)
line_pcd.points = o3d.Vector3dVector(points)
vis.add_geometry(line_pcd)
def pyplotGetCameraPos(gt_lwh):
return (2 * gt_lwh[2] - gt_lwh[3]).item()
def pyplotDrawLines(ax, cameraPos, lines_results, lwh, color=None):
points, lines, colors = cvtLinesResultsForDraw(lines_results, lwh, color)
points2d = points - np.array([0.0, cameraPos, 0.0])
points2d /= points2d[:, 1:2]
points2d = points2d[:, [0, 2]]
ax.set_facecolor("black")
for i, line in enumerate(lines):
p = points2d[line]
ax.plot(p[:, 0], p[:, 1], c=colors[i] if colors is not None else None, linewidth=1)
def drawWireframeOnEImg(ax, e_img, cor, color):
from visualization_from_json import wireframeGetMaskImg
ax.imshow(wireframeGetMaskImg(e_img, cor, color).cpu().numpy())
def cvtLinesResultsForDraw(lines_results, lwh, color=None):
points = []
lines = []
colors = []
for line in lines_results:
if color is None:
# if 0 <= line[6] <= 1:
# color = [1.0, 0.0, 0.0]
# elif 2 <= line[6] <= 3:
# color = [0.0, 1.0, 0.0]
# elif 4 <= line[6] <= 5:
# color = [0.0, 0.0, 1.0]
# elif 6 <= line[6] <= 7:
# color = [0.0, 0.0, 1.0]
if line[2] == 1:
color = [0.0, 1.0, 1.0]
else:
color = [0.0, 1.0, 0.0]
if line[1] == 0:
points.append([lwh[0], line[4], line[5]])
points.append([lwh[1], line[4], line[5]])
elif line[1] == 1:
points.append([line[3], lwh[2], line[5]])
points.append([line[3], lwh[3], line[5]])
elif line[1] == 2:
points.append([line[3], line[4], lwh[4]])
points.append([line[3], line[4], lwh[5]])
else:
assert False
lines.append([len(points) - 2, len(points) - 1])
colors.append(color)
return np.array(points), np.array(lines), colors
def o3dInitVis():
"""
Open3D自带的坐标轴中,红色是x轴,绿色是y轴,蓝色是z轴!
"""
vis = o3d.visualization.VisualizerWithKeyCallback()
def save_view_point(vis, filename):
param = vis.get_view_control().convert_to_pinhole_camera_parameters()
o3d.io.write_pinhole_camera_parameters(filename, param)
vis.register_key_callback(ord("S"), lambda vis: save_view_point(vis, "./view-" + str(int(time.time())) + ".json"))
vis.create_window(width=1386, height=752)
if os.path.exists("./view-1617968465.json"):
vis.get_view_control().convert_from_pinhole_camera_parameters(
o3d.read_pinhole_camera_parameters("./view-1617968465.json"))
renderOption: o3d.RenderOption = vis.get_render_option()
renderOption.background_color = np.array([0, 0, 0], dtype=np.float32)
renderOption.show_coordinate_frame = True
renderOption.point_size = 0.1
return vis
def makeLwhLines(lwh):
result = [
[0, 0, 0, math.nan, lwh[2], lwh[4], 0, 0],
[0, 0, 0, math.nan, lwh[2], lwh[5], 0, 0],
[0, 0, 0, math.nan, lwh[3], lwh[4], 0, 0],
[0, 0, 0, math.nan, lwh[3], lwh[5], 0, 0],
[0, 1, 0, lwh[0], math.nan, lwh[4], 0, 0],
[0, 1, 0, lwh[0], math.nan, lwh[5], 0, 0],
[0, 1, 0, lwh[1], math.nan, lwh[4], 0, 0],
[0, 1, 0, lwh[1], math.nan, lwh[5], 0, 0],
[0, 2, 0, lwh[0], lwh[2], math.nan, 0, 0],
[0, 2, 0, lwh[0], lwh[3], math.nan, 0, 0],
[0, 2, 0, lwh[1], lwh[2], math.nan, 0, 0],
[0, 2, 0, lwh[1], lwh[3], math.nan, 0, 0],
]
return lwh.new_tensor(result)
def visualize(cfg, input, output, drawtypes=None, show=False, dpi=None) -> Dict[str, np.ndarray]:
postResults = [postProcess(cfg, input, output, img_idx) for img_idx in range(input["p_imgs"].shape[0])]
return visualizeWithPostResults(cfg, input, output, postResults, drawtypes, show, dpi)
DRAW_3D_PREDBOX_COLOR = "blue"
DRAW_3D_GTBOX_COLOR = "white"
def visualizeWithPostResults(cfg, input, output, postResults: list, drawtypes=None, show=False, dpi=None) -> Dict[
str, np.ndarray]:
"""
结果可视化
:param input 数据集给出的输入
:param output 模型给出的输出
:param postResults 数组,内含input中的每张图片调用postProcess函数返回的结果
:param post_result postProcess函数返回的值
:return 字典,key是字符串,value是(h,w,3)的ndarray,图片的RGB矩阵。
"""
if drawtypes is None:
drawtypes = DEFAULT_DRAWTYPE
result = {}
with torch.no_grad():
for img_idx in range(input["p_imgs"].shape[0]):
(gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metric = postResults[img_idx]
for draw_idx, one_fig_types in enumerate(drawtypes):
fig: Figure = plt.figure(dpi=dpi)
fig.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0) # 清除四周边距
if isinstance(one_fig_types, str) and one_fig_types.find("e") == 0:
# 画单一的全景图
ax = plt.gca()
clearAxesLines(ax)
drawEqualRectCorners(cfg, ax, one_fig_types, input, output, img_idx, gt_cor_id, pred_cor_id)
elif isinstance(one_fig_types, str) and one_fig_types.find("3d") == 0:
ax = plt.gca()
clearAxesLines(ax)
cameraPos = pyplotGetCameraPos(gt_lwh)
if one_fig_types.find("predbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(pred_lwh), pred_lwh, DRAW_3D_PREDBOX_COLOR)
if one_fig_types.find("gtbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(gt_lwh), gt_lwh, DRAW_3D_GTBOX_COLOR)
pyplotDrawLines(ax, cameraPos, gt_lines, gt_lwh, "red")
pyplotDrawLines(ax, cameraPos, pred_lines, pred_lwh, "green")
elif isinstance(one_fig_types, list):
# 画左上角、右下角、右上角
for type in one_fig_types:
if type.find("e") == 0:
ax = getCubeAxes(fig, "E")
drawEqualRectCorners(cfg, ax, type, input, output, img_idx, gt_cor_id, pred_cor_id)
elif type.find("3d") == 0 and gt_lwh is not None and pred_lwh is not None:
ax = getCubeAxes(fig, "3D")
cameraPos = pyplotGetCameraPos(gt_lwh)
if type.find("predbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(pred_lwh), pred_lwh, DRAW_3D_PREDBOX_COLOR)
if type.find("gtbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(gt_lwh), gt_lwh, DRAW_3D_GTBOX_COLOR)
pyplotDrawLines(ax, cameraPos, gt_lines, gt_lwh, "red")
if len(pred_lines) > 0:
pyplotDrawLines(ax, cameraPos, pred_lines, pred_lwh, "green")
elif type == "text":
ax = getCubeAxes(fig, "TEXT")
toWrite = ""
if "CE" in metric and "PE" in metric:
toWrite = "CE:{:.3f} PE:{:.3f}\n" \
"3DIoU:{:.2f}\n" \
" ".format(
# "gt:{:s}\n {:s}\n" \
# "pr:{:s}\n {:s}\n".format(
metric["CE"], metric["PE"], metric["3DIoU"],
",".join(["{:.2f}".format(v.item()) for v in gt_lwh[0:3]]),
",".join(["{:.2f}".format(v.item()) for v in gt_lwh[3:6]]),
",".join(["{:.2f}".format(v.item()) for v in pred_lwh[0:3]]),
",".join(["{:.2f}".format(v.item()) for v in pred_lwh[3:6]]), )
elif "rmse" in metric and "delta_1" in metric:
toWrite = "3DIoU:{:.2f}\n" \
"2DIoU:{:.2f}\n" \
"delta_1:{:.3f}\n" \
" ".format(
# "rmse:{:.4f}\n".format(
metric["3DIoU"], metric["2DIoU"], metric["delta_1"], metric["rmse"],
)
ax.text(0, 0, toWrite)
# if "nz" in metric:
# ax.text(0, 0.85, "nz:" + metric["nz"], color="blue")
# if "noline" in metric:
# ax.text(0, 0.75, "nl:" + metric["noline"], color="red")
mask_buffer = {}
gtlines_colored_masks2d = None
# 画每个面
for view_idx, view_name in enumerate(VIEW_NAME):
ax = getCubeAxes(fig, view_name)
p_img = input["p_imgs"][img_idx, view_idx]
if one_fig_types[0] != "canny":
ax.imshow(p_img.permute(1, 2, 0).cpu().numpy())
one_fig_types_2 = one_fig_types
else:
ax.imshow(np.expand_dims(output["canny_image"][img_idx][view_idx], 2).repeat(3, 2))
one_fig_types_2 = one_fig_types[1:]
mask_buffer[view_idx] = {}
# 画每种类型的线
for type in one_fig_types_2:
t = getMaskByType(type, cfg, input, output, img_idx, view_idx)
if t is not None:
mat, color = t
mask_buffer[view_idx][type] = mat
mask_img = torch.cat([color.repeat(*mat.shape[0:2], 1), mat.unsqueeze(-1) * 255], 2)
mask_img = torch.round(mask_img).to(torch.uint8)
ax.imshow(mask_img.cpu().numpy())
if type == "gtlines_colored":
if gtlines_colored_masks2d is None:
gtlines_colored_masks2d, _ = getGTLines2DMasks(cfg, input, output, img_idx)
drawArray = cvtRGBMatToDrawingNdArray(gtlines_colored_masks2d[view_idx])
ax.imshow(drawArray)
if type == "border":
# 画一个白边框
BORDER_WIDTH = 2
img_hw = p_img.shape[-2:]
white_border_mask = np.ones((*img_hw, 4), dtype=np.uint8) * 255
white_border_mask[BORDER_WIDTH:img_hw[0] - BORDER_WIDTH,
BORDER_WIDTH:img_hw[1] - BORDER_WIDTH, 3] = 0
ax.imshow(white_border_mask)
if type.find("hough_line") == 0:
ax.set_xlim(0, 512)
ax.set_ylim(0, 512)
ax.invert_yaxis()
liness = output["hough_lines"][img_idx][view_idx]
for ii, lines in enumerate(liness):
for jj, line in enumerate(lines):
color = "b"
if jj == 0:
if type.find("red") != -1: continue
else:
if type.find("first_only") != -1:
continue
ax.plot([line[0], line[2]], [line[1], line[3]], color)
if type.find("red") != -1:
for ii, lines in enumerate(liness):
if len(lines) > 0:
line = lines[0]
color = "r"
ax.plot([line[0], line[2]], [line[1], line[3]], color)
# 获得图片
buf = io.BytesIO()
fig.savefig(buf, format="jpg")
buf.seek(0)
img = Image.open(buf) # 使用Image打开图片数据
img = np.asarray(img)
buf.close()
if show:
fig.show()
else:
result["{:s}--{:s}".format(input["filename"][img_idx], str(draw_idx))] = img
plt.close()
return result
| 22,450 | 43.021569 | 119 | py |
DMH-Net | DMH-Net-main/verify_vote.py | import argparse
import torch
from torch.utils.data import DataLoader
from tqdm import trange
from config import cfg, cfg_from_yaml_file, cfg_from_list
from e2plabel.e2plabelconvert import VIEW_NAME
from perspective_dataset import PerspectiveDataset
from visualization import getMaskByType, visualize
from postprocess.postprocess2 import get_vote_mask_c_up_down
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', type=str, required=True, help='specify the config for training')
parser.add_argument('--visu_count', default=2, type=int, help='visualize how many batches')
parser.add_argument('--batch_size', default=1, type=int, help='mini-batch size')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
device = torch.device('cuda')
dataset_valid = PerspectiveDataset(cfg, "test")
loader_valid = DataLoader(dataset_valid,
args.batch_size,
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=0,
pin_memory=True)
# 生成nearest_only类型的hough label的数据集
dataset_nearest_only = PerspectiveDataset(cfg, "test")
dataset_nearest_only.hough_label_gradual_type = "nearest_only"
iterator_valid = iter(loader_valid)
for valid_idx in trange(args.visu_count, desc='Verify CLine Vote', position=2):
input = next(iterator_valid)
def _core(input):
with torch.no_grad():
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
matss = []
for img_idx in range(input["p_imgs"].shape[0]):
mats = []
for view_idx, view_name in enumerate(VIEW_NAME):
mat, _ = getMaskByType("gtc", cfg, input, None, img_idx, view_idx)
mats.append(mat)
matss.append(mats)
gtc_map = torch.stack([torch.stack(mats, dim=0) for mats in matss], dim=0)
vmask = get_vote_mask_c_up_down(cfg, input["p_imgs"])
vmu, vmd = vmask[:, :, 0:vmask.shape[-1] // 2], vmask[:, :, vmask.shape[-1] // 2:]
hough_c_up_vote = torch.matmul(gtc_map.reshape(*gtc_map.shape[0:2], -1), vmu.reshape(-1, vmu.shape[-1]))
hough_c_down_vote = torch.matmul(gtc_map.reshape(*gtc_map.shape[0:2], -1),
vmd.reshape(-1, vmd.shape[-1]))
hough_vote_res = torch.stack([hough_c_up_vote, hough_c_down_vote], dim=3)
hough_vote_res = hough_vote_res / hough_vote_res.max()
gtc_output = {
"raw_cud": hough_vote_res
}
visualize(cfg, input, gtc_output, drawtypes=[["c gt"], ["c raw"]], show=True, dpi=600)
_core(input)
_core(loader_valid.collate_fn([dataset_nearest_only.getItem(f) for f in input["filename"]]))
| 3,433 | 46.041096 | 120 | py |
DMH-Net | DMH-Net-main/model.py | import math
import types
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from drn import drn_d_22, drn_d_38, drn_d_54
from e2plabel.e2plabelconvert import VIEW_NAME
from layers import FusionHoughStage, PerspectiveE2PP2E, HoughNewUpSampler
ENCODER_RESNET = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'resnet_official_34']
ENCODER_DENSENET = ['densenet121', 'densenet169', 'densenet161', 'densenet201']
ENCODER_HOUGH = ['unet18', 'vgg16', 'drn38', 'drn22', 'drn54']
def OfficialResnetWrapper(model):
# 从torchvision 0.10.0源码的resnet.py中复制
def _forward_impl(self, x: torch.Tensor) -> Tuple[torch.Tensor, ...]: # Tuple[torch.Tensor * 5]
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
c1 = self.relu(x)
x = self.maxpool(c1)
c2 = self.layer1(x)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
# x = self.avgpool(x) # 不需要分类系列特征,所以不要GAP和全连接
# x = torch.flatten(x, 1)
# x = self.fc(x)
return c1, c2, c3, c4, c5
model._forward_impl = types.MethodType(_forward_impl, model)
return model
class DMHNet(nn.Module):
x_mean = torch.FloatTensor(np.array([0.485, 0.456, 0.406])[None, :, None, None])
x_std = torch.FloatTensor(np.array([0.229, 0.224, 0.225])[None, :, None, None])
def __init__(self, cfg, backbone, use_rnn):
super(DMHNet, self).__init__()
self.cfg = cfg
self.backbone = backbone
self.use_rnn = use_rnn # 应该是没用到的参数
self.out_scale = 4
self.step_cols = 1
self.hidden_size = 256
self.fov = 160
# Encoder
def makeFeatureExtractor():
if backbone == "resnet_official_34":
from torchvision.models.resnet import resnet34
return OfficialResnetWrapper(resnet34(pretrained=True))
if backbone == "resnet_official_50":
from torchvision.models.resnet import resnet50
return OfficialResnetWrapper(resnet50(pretrained=True))
if backbone == "resnet_official_18":
from torchvision.models.resnet import resnet18
return OfficialResnetWrapper(resnet18(pretrained=True))
if backbone == "resnet_official_101":
from torchvision.models.resnet import resnet101
return OfficialResnetWrapper(resnet101(pretrained=True))
elif backbone.startswith('drn22'):
return drn_d_22(pretrained=True, out_middle=True)
elif backbone.startswith('drn38'):
return drn_d_38(pretrained=True, out_middle=True)
elif backbone.startswith('drn54'):
return drn_d_54(pretrained=True, out_middle=True)
else:
raise NotImplementedError()
self.feature_extractor = [makeFeatureExtractor()]
self._feature_extractor_ref = [0] * 7 # 第七个表示全景图所使用的feature_extractor
if self.cfg.MODEL.BACKBONE.PRIVATE_UPDOWN:
self.feature_extractor.append(makeFeatureExtractor())
self._feature_extractor_ref[4:6] = [len(self.feature_extractor) - 1] * 2
if self.cfg.MODEL.BACKBONE.PRIVATE_UP:
self.feature_extractor.append(makeFeatureExtractor())
self._feature_extractor_ref[5] = len(self.feature_extractor) - 1
self.feature_extractor = nn.ModuleList(self.feature_extractor)
# Input shape
H, W = 512, 1024
# Inference channels number from each block of the encoder
with torch.no_grad():
dummy = torch.zeros(1, 3, 512, 512)
if backbone.startswith('drn'):
net_out = self.feature_extractor[0](dummy)[1]
else:
net_out = self.feature_extractor[0](dummy)
c0, c1, c2, c3, c4 = [b.shape[1] for b in net_out]
size0, size1, size2, size3, size4 = [b.shape[2] for b in net_out]
self.c0, self.c1, self.c2, self.c3, self.c4 = c0, c1, c2, c3, c4
# print("c0, c1, c2, c3, c4", c0, c1, c2, c3, c4)
c_last = int((c1 * 8 + c2 * 4 + c3 * 4 + c4 * 4) / self.out_scale)
self.x_mean.requires_grad = False
self.x_std.requires_grad = False
def make5HoughModules():
return nn.ModuleList([
PerspectiveE2PP2E(self.cfg, size0, size0, size0, self.fov, c0, 1),
PerspectiveE2PP2E(self.cfg, size1, size1, size1, self.fov, c1, 1),
# TODO 对降维到hw=64的特征图,角度的霍夫投票个数还能是180吗?
PerspectiveE2PP2E(self.cfg, size2, size2, size2, self.fov, c2, 1,
hough_angles_num=90),
PerspectiveE2PP2E(self.cfg, size3, size3, size3, self.fov, c3, 1,
hough_angles_num=90),
PerspectiveE2PP2E(self.cfg, size4, size4, size4, self.fov, c4, 1,
hough_angles_num=90),
])
self.hough = [make5HoughModules(), make5HoughModules(), make5HoughModules()]
self._hough_ref = [0, 0, 0, 0, 1, 2]
self.hough = nn.ModuleList(self.hough)
def make2FusionModules():
factor = self.cfg.MODEL.get("CONV1_CHANNEL_FACTOR", 2)
return nn.ModuleList([
FusionHoughStage(self.cfg, "xy", 3, c0 // factor, c1 // factor, c2 // factor, c3 // factor,
c4 // factor,
upsample_rate=[512 // size0, 512 // size1, 512 // size2, 512 // size3,
512 // size4, ]), # xy hough特征的fusion
FusionHoughStage(self.cfg, "cupdown", 3, c0 // factor, c1 // factor, c2 // factor, c3 // factor,
c4 // factor,
upsample_rate=[512 // size0, 512 // size1, 512 // size2, 512 // size3, 512 // size4, ],
upsampler_class=HoughNewUpSampler),
# cupdown hough特征的fusion
])
self.fusion_stage = [make2FusionModules(), make2FusionModules(), make2FusionModules()]
self._fusion_stage_ref = [0, 0, 0, 0, 1, 2]
self.fusion_stage = nn.ModuleList(self.fusion_stage)
def _input_image_normalize(self, x):
if self.x_mean.device != x.device:
self.x_mean = self.x_mean.to(x.device)
self.x_std = self.x_std.to(x.device)
return (x[:, :3] - self.x_mean) / self.x_std
def _get_feature_extractor(self, i):
return self.feature_extractor[self._feature_extractor_ref[i]]
def _get_hough(self, i):
return self.hough[self._hough_ref[i]]
def _get_fusion_stage(self, i):
return self.fusion_stage[self._fusion_stage_ref[i]]
def forward(self, input):
results_dict = {}
p_xys = []
p_cuds = []
for view_idx in range(input["p_imgs"].shape[1]): # 对所有sample的每个view做循环
p_img = self._input_image_normalize(input["p_imgs"][:, view_idx])
p_conv_list = self._get_feature_extractor(view_idx)(p_img)
if len(p_conv_list) == 2: p_conv_list = p_conv_list[1]
p_hough_bin_feat = [hough(onefeat) for onefeat, hough in zip(p_conv_list, self._get_hough(view_idx))]
fusioner = self._get_fusion_stage(view_idx)
# Decoder for xy peaks
p_hough_feat_xy = [f[0] for f in p_hough_bin_feat]
p_xy = fusioner[0](p_hough_feat_xy)
p_xys.append(p_xy)
# 中心线的解码器
p_hough_feat_cud = [f[1] for f in p_hough_bin_feat]
p_cud = fusioner[1](p_hough_feat_cud)
p_cuds.append(p_cud)
results_dict.update({
"p_preds_xy": torch.cat(p_xys, 1),
"p_preds_cud": torch.cat(p_cuds, 1),
})
if self.cfg.MODEL.get("NO_CLINE_PRED"):
results_dict["p_preds_cud"] = torch.ones_like(results_dict["p_preds_cud"]) * -math.inf
if self.cfg.MODEL.get("NO_HLINE_PRED"):
results_dict["p_preds_xy"][:, :, :, 1] = torch.ones_like(results_dict["p_preds_xy"][:, :, :, 1]) * -math.inf
if self.cfg.MODEL.get("NO_VLINE_PRED"):
results_dict["p_preds_xy"][:, :, :, 0] = torch.ones_like(results_dict["p_preds_xy"][:, :, :, 0]) * -math.inf
losses = self.calculate_loss(input, results_dict)
return losses, results_dict
def calculate_loss(self, input, output):
device = input["e_img"].device
xLabels = input["xLabels"].to(device)
yLabels = input["yLabels"].to(device)
cUpLabels = input["cUpLabels"].to(device)
cDownLabels = input["cDownLabels"].to(device)
losses = {
"total": 0.0
}
# 附加loss:仅供debug使用,不会计入总量!
losses["extra_xLabels"] = 0.0
losses["extra_yLabels"] = 0.0
losses["extra_cUpLabels"] = 0.0
losses["extra_cDownLabels"] = 0.0
# 六个perspective loss
for view_idx in range(output["p_preds_xy"].shape[1]):
one_loss_x = []
one_loss_y = []
one_loss_c_up = []
one_loss_c_down = []
for img_idx in range(output["p_preds_xy"].shape[0]):
the_onepred_xy = output["p_preds_xy"][img_idx, view_idx]
if not self.cfg.MODEL.get("NO_VLINE_PRED"):
the_oneloss_x = F.binary_cross_entropy_with_logits(the_onepred_xy[:, 0], xLabels[img_idx, view_idx])
one_loss_x.append(the_oneloss_x)
if not self.cfg.MODEL.get("NO_HLINE_PRED"):
the_oneloss_y = F.binary_cross_entropy_with_logits(the_onepred_xy[:, 1], yLabels[img_idx, view_idx])
one_loss_y.append(the_oneloss_y)
if not self.cfg.MODEL.get("NO_CLINE_PRED"):
the_onepred_cud = output["p_preds_cud"][img_idx, view_idx]
the_oneloss_c_up = F.binary_cross_entropy_with_logits(the_onepred_cud[:, 0],
cUpLabels[img_idx, view_idx])
one_loss_c_up.append(the_oneloss_c_up)
the_oneloss_c_down = F.binary_cross_entropy_with_logits(the_onepred_cud[:, 1],
cDownLabels[img_idx, view_idx])
one_loss_c_down.append(the_oneloss_c_down)
one_loss_x = (torch.stack(one_loss_x) if len(one_loss_x) > 0 else output["p_preds_xy"].new_tensor([])) \
.sum() / output["p_preds_xy"].shape[0] # 求和再除以batchsize,而不是求平均,以保证每个图片对loss的贡献相同
one_loss_y = (torch.stack(one_loss_y) if len(one_loss_y) > 0 else output["p_preds_xy"].new_tensor([])) \
.sum() / output["p_preds_xy"].shape[0]
one_loss_c_up = (torch.stack(one_loss_c_up) if len(one_loss_c_up) > 0 else output["p_preds_cud"].new_tensor(
[])).sum() / output["p_preds_xy"].shape[0]
one_loss_c_down = (torch.stack(one_loss_c_down) if len(one_loss_c_down) > 0 else output[
"p_preds_cud"].new_tensor([])).sum() / output["p_preds_xy"].shape[0]
with torch.no_grad():
losses["extra_xLabels"] += one_loss_x
losses["extra_yLabels"] += one_loss_y
losses["extra_cUpLabels"] += one_loss_c_up
losses["extra_cDownLabels"] += one_loss_c_down
one_loss = one_loss_x + one_loss_y + one_loss_c_up + one_loss_c_down
losses["p_" + VIEW_NAME[view_idx]] = one_loss
losses["total"] += self.cfg.MODEL.get("LOSS", {}).get("ALPHA_PERSPECTIVE", 1.0) * one_loss
return losses
| 11,875 | 45.031008 | 120 | py |
DMH-Net | DMH-Net-main/drn.py | import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BatchNorm = nn.BatchNorm2d
# __all__ = ['DRN', 'drn26', 'drn42', 'drn58']
webroot = 'http://dl.yf.io/drn/'
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'drn-c-26': webroot + 'drn_c_26-ddedf421.pth',
'drn-c-42': webroot + 'drn_c_42-9d336e8c.pth',
'drn-c-58': webroot + 'drn_c_58-0a53a92c.pth',
'drn-d-22': webroot + 'drn_d_22-4bd2f8ea.pth',
'drn-d-38': webroot + 'drn_d_38-eebb45f0.pth',
'drn-d-54': webroot + 'drn_d_54-0e0534ff.pth',
'drn-d-105': webroot + 'drn_d_105-12b40979.pth'
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride,
padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes,
padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation[1], bias=False,
dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DRN(nn.Module):
def __init__(self, block, layers, num_classes=1000,
channels=(16, 32, 64, 128, 256, 512, 512, 512),
out_map=False, out_middle=False, pool_size=28, arch='D'):
super(DRN, self).__init__()
self.inplanes = channels[0]
self.out_map = out_map
self.out_dim = channels[-1]
self.out_middle = out_middle
self.arch = arch
if arch == 'C':
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False)
self.bn1 = BatchNorm(channels[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
BasicBlock, channels[0], layers[0], stride=1)
self.layer2 = self._make_layer(
BasicBlock, channels[1], layers[1], stride=2)
elif arch == 'D':
self.layer0 = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,
bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_conv_layers(
channels[0], layers[0], stride=1)
self.layer2 = self._make_conv_layers(
channels[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
self.layer5 = self._make_layer(block, channels[4], layers[4],
dilation=2, new_level=False)
self.layer6 = None if layers[5] == 0 else \
self._make_layer(block, channels[5], layers[5], dilation=4,
new_level=False)
if arch == 'C':
self.layer7 = None if layers[6] == 0 else \
self._make_layer(BasicBlock, channels[6], layers[6], dilation=2,
new_level=False, residual=False)
self.layer8 = None if layers[7] == 0 else \
self._make_layer(BasicBlock, channels[7], layers[7], dilation=1,
new_level=False, residual=False)
elif arch == 'D':
self.layer7 = None if layers[6] == 0 else \
self._make_conv_layers(channels[6], layers[6], dilation=2)
self.layer8 = None if layers[7] == 0 else \
self._make_conv_layers(channels[7], layers[7], dilation=1)
if num_classes > 0:
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(self.out_dim, num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
new_level=True, residual=True):
assert dilation == 1 or dilation % 2 == 0
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = list()
layers.append(block(
self.inplanes, planes, stride, downsample,
dilation=(1, 1) if dilation == 1 else (
dilation // 2 if new_level else dilation, dilation),
residual=residual))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, residual=residual,
dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def _make_conv_layers(self, channels, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(self.inplanes, channels, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(channels),
nn.ReLU(inplace=True)])
self.inplanes = channels
return nn.Sequential(*modules)
def forward(self, x):
y = list()
if self.arch == 'C':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
elif self.arch == 'D':
x = self.layer0(x)
x = self.layer1(x)
# y.append(x)
x = self.layer2(x)
y.append(x)
x = self.layer3(x)
y.append(x)
x = self.layer4(x)
y.append(x)
x = self.layer5(x)
y.append(x)
if self.layer6 is not None:
x = self.layer6(x)
y.append(x)
if self.layer7 is not None:
x = self.layer7(x)
# y.append(x)
if self.layer8 is not None:
x = self.layer8(x)
# y.append(x)
if self.out_map:
x = self.fc(x)
else:
x = x
# x = self.avgpool(x)
# x = self.fc(x)
# x = x.view(x.size(0), -1)
if self.out_middle:
return x, y
else:
return x
class DRN_A(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = 512 * block.expansion
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4)
self.avgpool = nn.AvgPool2d(28, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def drn_a_50(pretrained=False, **kwargs):
model = DRN_A(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def drn_c_26(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-26']))
return model
def drn_c_42(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-42']))
return model
def drn_c_58(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-58']))
return model
def drn_d_22(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-22']))
return model
def drn_d_24(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-24']))
return model
def drn_d_38(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-38']))
return model
def drn_d_40(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-40']))
return model
def drn_d_54(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-54']))
return model
def drn_d_56(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-56']))
return model
def drn_d_105(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-105']))
return model
def drn_d_107(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-107']))
return model | 14,207 | 33.236145 | 88 | py |
DMH-Net | DMH-Net-main/layers.py | import math
import torch
import torch.nn as nn
class PerspectiveE2PP2E(nn.Module):
def __init__(self, cfg, input_h, input_w, pers_h, fov, input_feat, output_feat, hough_angles_num=180,
hoguh_clines_tole=1.0):
super(PerspectiveE2PP2E, self).__init__()
self.cfg = cfg
self.hoguh_clines_tole = hoguh_clines_tole
self.hough_angles_num = hough_angles_num
self.input_h = input_h
self.input_w = input_w
self.pers_h = pers_h
self.fov = fov
self.input_feat = input_feat
dim = input_feat // self.cfg.MODEL.get("CONV1_CHANNEL_FACTOR", 2)
# conv1构建
self.conv1_x = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv1_cup = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv1_cdown = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv1_y = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
# conv2构建
self.conv2_x = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv2_cup = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv2_cdown = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv2_y = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
# 过中心点的线的霍夫投票所用的矩阵
self.vote_mask_c_up = None
self.vote_mask_c_down = None
def makeVoteMask(self, img_size, device):
vote_mask_c_up, vote_mask_c_down = self.makeVoteMaskStatic(self.cfg.MODEL.HOUGH.CLINE_TYPE, img_size, device,
self.hough_angles_num, self.hoguh_clines_tole)
# 解决显存爆炸:转为(h*w, 180)的矩阵,与特征做矩阵乘法
self.vote_mask_c_up = vote_mask_c_up.reshape(-1, vote_mask_c_up.shape[-1])
self.vote_mask_c_down = vote_mask_c_down.reshape(-1, vote_mask_c_down.shape[-1])
@staticmethod
def makeVoteMaskStatic(type: str, img_size, device, hough_angles_num=180, hoguh_clines_tole=1.0):
if type == "NEW":
def scatterResult(input: torch.Tensor, dim: int) -> torch.Tensor:
result = torch.zeros(*input.shape, input.shape[dim], device="cpu", dtype=torch.float64)
input = input.unsqueeze(-1).transpose(dim, -1)
integer_part = torch.floor(input).to(torch.int64)
decimal_part = input - integer_part
result.scatter_add_(dim, integer_part, 1 - decimal_part)
result.scatter_add_(dim, torch.ceil(input).to(torch.int64), decimal_part)
return result
# 规则:对边缘的每个像素,对应于一个角度,例如512*512的图,上半部分就会对应2*256+512-2=1022个角度
# 每个角度往图片中心做连线,每个角度都固定是由256个像素点加和
# 对于线不正好穿过像素中心的情况,则实施线性插值
#
# 1022维的方向:上半圆从最左侧,顺时针增加至最右侧;下半圆从最右侧,顺时针增加至最左侧。
with torch.no_grad():
h2, w2 = (img_size[0] - 1) / 2, (img_size[1] - 1) / 2
rangeX = torch.arange(img_size[1], device="cpu", dtype=torch.float64)
rangeY = torch.arange(img_size[0], device="cpu", dtype=torch.float64)
# 计算:左右边上的每个点,向中心连线,经过的每个x位置,对应的y值
lr_mat = (torch.abs(w2 - rangeX) / w2).unsqueeze(0) * (rangeY - h2).unsqueeze(1) + h2 # 大小为(512,512)
lr_res = scatterResult(lr_mat, 0)
l_res = torch.cat((lr_res[:, 0:math.ceil(img_size[1] / 2)],
torch.zeros((lr_res.shape[0], img_size[1] // 2, lr_res.shape[2]), device="cpu",
dtype=torch.float64)),
dim=1)
r_res = torch.cat((torch.zeros((lr_res.shape[0], img_size[1] // 2, lr_res.shape[2]), device="cpu",
dtype=torch.float64),
lr_res[:, img_size[1] // 2:]),
dim=1)
# 计算:上下边上的每个点,向中心连线,经过的每个y位置,对应的x值
ud_mat = (torch.abs(h2 - rangeY) / h2).unsqueeze(1) * (rangeX - w2).unsqueeze(0) + w2 # 大小为(512,512)
ud_res = scatterResult(ud_mat, 1)
# 拼接组合出最终结果
h2f, h2c = img_size[0] // 2, math.ceil(img_size[0] / 2)
vote_mask_c_up = torch.cat([l_res[:h2c, :, 1:h2f].flip([2]), ud_res[:h2c], r_res[:h2c, :, 1:h2f]],
dim=2)
vote_mask_c_down = torch.cat(
[r_res[h2f:, :, h2c:-1], ud_res[h2f:].flip([2]), l_res[h2f:, :, h2c:-1].flip([2])],
dim=2)
vote_mask_c_up = torch.cat(
[vote_mask_c_up.to(torch.float32),
torch.zeros((h2f, *vote_mask_c_up.shape[1:]), device="cpu", dtype=torch.float32)], dim=0)
vote_mask_c_down = torch.cat(
[torch.zeros((h2f, *vote_mask_c_down.shape[1:]), device="cpu", dtype=torch.float32),
vote_mask_c_down.to(torch.float32)], dim=0)
else:
raise NotImplementedError()
return vote_mask_c_up.to(device).contiguous(), vote_mask_c_down.to(device).contiguous()
def forward(self, pers):
# conv1
featmap_x = self.conv1_x(pers)
featmap_cup = self.conv1_cup(pers)
featmap_cdown = self.conv1_cdown(pers)
featmap_y = self.conv1_y(pers)
hough_x_vote = featmap_x.sum(dim=2, keepdim=True)
hough_x_vote_reshape = hough_x_vote.reshape(hough_x_vote.shape[0], -1, self.pers_h)
hough_y_vote = featmap_y.sum(dim=3, keepdim=True)
hough_y_vote_reshape = hough_y_vote.reshape(hough_y_vote.shape[0], -1, self.pers_h)
# 中心线投票
if self.vote_mask_c_up is None:
self.makeVoteMask(featmap_cup.shape[2:4], featmap_cup.device)
hough_c_up_vote = torch.matmul(featmap_cup.reshape(*featmap_cup.shape[0:2], -1), self.vote_mask_c_up)
hough_c_down_vote = torch.matmul(featmap_cdown.reshape(*featmap_cdown.shape[0:2], -1), self.vote_mask_c_down)
# conv2: conv in hough space
hough_feat = torch.cat(
[self.conv2_x(hough_x_vote_reshape.unsqueeze(-1)), self.conv2_y(hough_y_vote_reshape.unsqueeze(-1))],
dim=3)
hough_feat_cud = torch.cat(
[self.conv2_cup(hough_c_up_vote.unsqueeze(-1)), self.conv2_cdown(hough_c_down_vote.unsqueeze(-1))],
dim=3)
return hough_feat, hough_feat_cud
def __repr__(self):
return "FeatureShape(H={}, W={}, C={}), Perspective Length (distance_bin_num={}, fov={})".format(
self.input_h, self.input_w, self.input_feat, self.pers_h, self.fov)
class HoughNewUpSampler(nn.Module):
def __init__(self, upsample_rate: int):
super().__init__()
self.ul = nn.Upsample(scale_factor=(upsample_rate, 1), mode='bilinear', align_corners=False)
self.um = nn.Upsample(scale_factor=(upsample_rate, 1), mode='bilinear', align_corners=False)
self.ur = nn.Upsample(scale_factor=(upsample_rate, 1), mode='bilinear', align_corners=False)
def forward(self, x):
# 仅适用于偶数尺寸正方形图片的处理(因为原始图片的宽和高没有传进来,这里就默认为正方形来做上采样了)
assert (x.shape[2] + 2) % 4 == 0, "仅适用于偶数尺寸正方形图片的处理"
img_half_size = (x.shape[2] + 2) // 4
l = self.ul(x[:, :, 0:img_half_size])
m = self.um(x[:, :, img_half_size - 1:3 * img_half_size - 1])
r = self.ur(x[:, :, 3 * img_half_size - 2:])
return torch.cat([l[:, :, :-1], m, r[:, :, 1:]], dim=2)
class FusionHoughStage(nn.Module):
def __init__(self, cfg, type: str, c_ori, c0, c1, c2, c3, c4, upsample_rate=None, upsampler_class=None):
super(FusionHoughStage, self).__init__()
self.type = type
self.cfg = cfg
if upsample_rate is None:
upsample_rate = [2, 4, 8, 8, 8]
def getSampler(u):
if u == 1:
return nn.Identity()
elif upsampler_class is not None:
return upsampler_class(u)
else:
return nn.Upsample(scale_factor=(u, 1), mode='bilinear', align_corners=False)
self.upsamplers = nn.ModuleList([
getSampler(u) for u in upsample_rate
])
self.c_total = c0 + c1 + c2 + c3 + c4
self.conv1 = nn.Sequential(
nn.Conv2d(self.c_total, self.c_total // 2, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, self.c_total // 2, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, 1, kernel_size=(1, 1), padding=(0, 0)),
)
self.conv1_2 = nn.Sequential(
nn.Conv2d(self.c_total, self.c_total // 2, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, self.c_total // 2, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, 1, kernel_size=(1, 1), padding=(0, 0)),
)
self.use_different_conv1 = True
# self.conv2 = nn.Sequential(nn.Conv2d(self.c_total // 2, 1, kernel_size=(1, 1), padding=(0, 0), bias=False))
def forward(self, x):
concat_feat = torch.cat([sam(t) for t, sam in zip(x, self.upsamplers)], 1)
if self.use_different_conv1:
feat = torch.cat([self.conv1(concat_feat[:, :, :, 0:1]), self.conv1_2(concat_feat[:, :, :, 1:2])], dim=3)
else:
feat = self.conv1(concat_feat)
prob = feat # self.conv2(feat)
# concat_feat = concat_feat.permute(0,2,1,3).reshape(f_ori.shape[0], 256, -1)
# prob = self.linear(concat_feat).unsqueeze(1)
return prob
| 12,016 | 49.491597 | 117 | py |
DMH-Net | DMH-Net-main/eval.py | import argparse
import json
import os
# import ipdb
import sys
import time
import warnings
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch.utils.data import DataLoader
from tqdm import trange
from config import cfg, cfg_from_yaml_file, cfg_from_list, merge_new_config
from misc.utils import pipeload
from model import ENCODER_RESNET, ENCODER_DENSENET, ENCODER_HOUGH, DMHNet
from perspective_dataset import PerspectiveDataset, worker_init_fn
from postprocess.postprocess2 import postProcess
from visualization import visualizeWithPostResults
from torch.nn import functional as F
SAVE_JSON = False
def valid(cfg, net, loader_valid, dataset_valid, device, visualize_count=0, show=False, dpi=None, print_detail=False,
valid_epoch=None):
"""
验证用的核心函数
:param valid_epoch None表示是test,数字表示是valid,值表示触发valid的epoch number
"""
net.eval()
# torch.cuda.empty_cache()
iterator_valid = iter(loader_valid)
valid_loss = {}
metrics = {}
metrics_by_corner = {}
imgs = {}
visualize_index = np.zeros(len(loader_valid), dtype=bool)
visualize_index[np.random.choice(len(loader_valid), size=visualize_count, replace=False)] = True
for valid_idx in trange(len(loader_valid), desc='Eval', position=2):
input = next(iterator_valid)
valid_batch_size = input["e_img"].size(0)
with torch.no_grad():
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
losses, results_dict = net(input)
postResults = []
for i in range(len(input["filename"])):
print(input["filename"][i])
postStartTime = time.time()
postResult = postProcess(cfg, input, results_dict, i, is_valid_mode=valid_epoch is not None)
postResults.append(postResult)
if print_detail:
(_, gt_lwh, _), (_, pred_lwh, _), metric = postResult
print("{:s} pred{:s} gt{:s} {:s}".format(str(metric), str(pred_lwh), str(gt_lwh),
input["filename"][i]))
if SAVE_JSON or ("args" in globals() and args.print_json):
(_, _, _), (_, _, pred_cors), metric = postResult
uv = pred_cors.cpu().numpy() / input["e_img"].shape[-1:-3:-1]
uv = [[o.item() for o in pt] for pt in uv]
if SAVE_JSON:
JSON_DIR = "./result_json"
os.makedirs(JSON_DIR, exist_ok=True)
with open(os.path.join(JSON_DIR, input["filename"][i] + ".json"), "w") as f:
json.dump({"uv": uv, "3DIoU": metric["3DIoU"].item()}, f)
elif "args" in globals() and args.print_json:
print(json.dumps({"uv": uv, "3DIoU": metric["3DIoU"].item()}))
_, _, metric = postResult
for k, v in metric.items():
if isinstance(v, str): continue
metrics[k] = metrics.get(k, 0) + v.item()
if "n_corners_type" in metric:
k2 = metric["n_corners_type"] + "/" + k
if k2 not in metrics_by_corner: metrics_by_corner[k2] = []
metrics_by_corner[k2].append(v.item())
metrics["gt_n_corners"] = metrics.get("gt_n_corners", 0) + (len(input["cor"][i]) // 2)
for k, v in losses.items():
valid_loss[k] = valid_loss.get(k, 0) + v.item() * valid_batch_size
if visualize_index[valid_idx]:
visualize_type = cfg.get("VISUALIZATION", {}).get("TYPE")
imgs.update(visualizeWithPostResults(cfg, input, results_dict, postResults, drawtypes=visualize_type,
show=show, dpi=dpi))
for k, v in valid_loss.items():
valid_loss[k] = v / len(dataset_valid)
for k, v in metrics.items():
metrics[k] = v / len(dataset_valid)
for k, v in metrics_by_corner.items():
metrics[k] = torch.tensor(v).mean().item()
return valid_loss, imgs, metrics
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', type=str, required=True, help='specify the config for training')
parser.add_argument('--ckpt', required=True, help='checkpoint for evaluation')
parser.add_argument('--visu_count', default=0, type=int, help='visualize how many batches')
parser.add_argument('--visu_all', action='store_true', help='visualize all samples')
parser.add_argument('--visu_path', help='where to save the visualization result (default: plt.show)')
parser.add_argument('--visu_type',
help='specify visualization type (either str or List[str], see visualization.py)')
parser.add_argument('--no_post_process', action='store_true', help='don\'t post process')
parser.add_argument('--develop_post_process', action='store_true', help='use POST_PROCESS.METHOD = \'develop\'')
parser.add_argument('--valid_set', action='store_true', help='use valid set')
parser.add_argument('--batch_size', default=2, type=int, help='mini-batch size')
parser.add_argument('--input_file', type=str, help='eval on one single input image')
parser.add_argument('--print_detail', action='store_true', help='print detail for each sample')
parser.add_argument('--save_json', action='store_true', help='save json to ./result_json')
parser.add_argument('--print_json', action='store_true', help='print json for each sample')
parser.add_argument('--output_file', nargs="?", const=True,
help='whether to output to file 如果不填写参数,默认输出到eval_outputs/{time}.out')
# Model related
parser.add_argument('--backbone',
default='drn38',
choices=ENCODER_RESNET + ENCODER_DENSENET + ENCODER_HOUGH,
help='backbone of the network')
parser.add_argument('--no_rnn', action='store_true', help='whether to remove rnn or not')
# Dataset related arguments
# TODO 原始代码交换了测试集与训练集 没有验证集
# 新代码用的就是原始的训练集和测试集
# parser.add_argument('--train_root_dir',
# default='data/layoutnet_dataset/test',
# help='root directory to training dataset. '
# 'should contains img, label_cor subdirectories')
parser.add_argument('--valid_root_dir',
default='data/layoutnet_dataset/train',
help='root directory to validation dataset. '
'should contains img, label_cor subdirectories')
parser.add_argument('--num_workers', default=4 if not sys.gettrace() else 0, type=int,
help='numbers of workers for dataloaders')
# Misc arguments
parser.add_argument('--no_cuda', action='store_true', help='disable cuda')
parser.add_argument('--seed', default=594277, type=int, help='manual seed')
parser.add_argument('--disp_iter', type=int, default=1, help='iterations frequency to display')
parser.add_argument('--no_multigpus', action='store_true', help='disable data parallel')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
if args.save_json: SAVE_JSON = True
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
output_file = args.output_file if args.output_file != True else "eval_outputs/{:d}.out".format(int(time.time()))
if output_file is not None:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
output_file = open(output_file, "w")
if args.visu_type:
merge_new_config(cfg, {"VISUALIZATION": {"TYPE": yaml.safe_load(args.visu_type)}})
if args.no_post_process:
cfg.POST_PROCESS.METHOD = "None"
elif args.develop_post_process:
cfg.POST_PROCESS.METHOD = "develop"
device = torch.device('cpu' if args.no_cuda else 'cuda')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
result_dir = os.path.join("eval_result", str(int(time.time())))
os.makedirs(result_dir, exist_ok=True)
# Create dataloader
print("num_workers: " + str(args.num_workers))
dataset_valid = PerspectiveDataset(cfg, "test" if not args.valid_set else "valid", # TODO 新代码现在是用测试集进行验证的
filename=args.input_file)
loader_valid = DataLoader(dataset_valid,
args.batch_size,
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
# Create model
net = DMHNet(cfg, cfg.MODEL.get("BACKBONE", {}).get("NAME", "drn38"), not args.no_rnn).to(device)
if not args.no_multigpus:
net = nn.DataParallel(net) # multi-GPU
print(str(cfg.POST_PROCESS))
if output_file: output_file.write(str(cfg.POST_PROCESS) + "\n\n")
if args.ckpt == "None":
warnings.warn("ckpt参数显式传入了None!将不会加载任何参数!")
else:
state_dict = pipeload(args.ckpt, map_location='cpu')["state_dict"]
net.load_state_dict(state_dict, strict=True)
visualize_count = len(loader_valid) if args.visu_all else args.visu_count
show = args.visu_path is None
valid_loss, imgs, metrics = valid(cfg, net, loader_valid, dataset_valid, device, visualize_count, show=show,
dpi=200, print_detail=args.print_detail)
for k, v in valid_loss.items():
k = 'eval_loss/%s' % k
print("{:s} {:f}".format(k, v))
if output_file: output_file.write("{:s} {:f}".format(k, v) + "\n")
for k, v in metrics.items():
k = 'metric/%s' % k
print("{:s} {:f}".format(k, v))
if output_file: output_file.write("{:s} {:f}".format(k, v) + "\n")
if output_file:
output_file.write("\n\n")
output_file.write(str(cfg) + "\n")
for k, v in imgs.items():
if args.visu_path:
os.makedirs(args.visu_path, exist_ok=True)
success = cv2.imwrite(os.path.join(args.visu_path, k + ".jpg"), cv2.cvtColor(v, cv2.COLOR_RGB2BGR))
assert success, "write output image fail!"
| 10,926 | 45.300847 | 117 | py |
DMH-Net | DMH-Net-main/perspective_dataset.py | import os
import warnings
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
from easydict import EasyDict
from scipy.spatial.distance import cdist
from shapely.geometry import LineString
from torch.utils.data._utils.collate import default_collate
from torchvision.transforms import transforms
from e2plabel.e2plabelconvert import generatePerspective, linesPostProcess
from misc import panostretch
from misc import post_proc
class PerspectiveDataset(data.Dataset):
def __init__(self,
cfg: EasyDict,
split: str,
filename=None,
train_mode=False):
self.cfg = cfg
self.train_mode = train_mode
self.rotate = None # rotate # TODO 回原版代码核对一下 rotate怎么实现的?
self.H, self.W = (512, 1024) if "IMG_SIZE" not in cfg.DATA else cfg.DATA.IMG_SIZE
self.FOV = 90 if "FOV" not in cfg.DATA else cfg.DATA.FOV
self.P = 512 if "PERSPECTIVE_SIZE" not in cfg.DATA else cfg.DATA.PERSPECTIVE_SIZE
self.bin_num = 512
self.hough_label_gradual_type = "exp"
# e2p参数设定 详见e2plabelconvert.py
self.view_args = [
[(self.FOV, self.FOV), 0, 0],
[(self.FOV, self.FOV), 90, 0],
[(self.FOV, self.FOV), 180, 0],
[(self.FOV, self.FOV), -90, 0],
[(self.FOV, self.FOV), 0, 90],
[(self.FOV, self.FOV), 0, -90],
]
self.view_name = ['F', 'R', 'B', 'L', 'U', 'D']
self.view_size = (self.P, self.P)
self.ch = -1.6
self.randomEraser = transforms.RandomErasing()
# self._check_dataset()
root_dir = cfg.DATA.ROOT_DIR
self.path = os.path.join(root_dir, split)
self.img_dir = os.path.join(self.path, 'img')
self.cor_dir = os.path.join(self.path, 'label_cor')
self.img_fnames = sorted([fname for fname in os.listdir(self.img_dir)])
if cfg.DATA.get("PREFIX") is not None:
self.img_fnames = [fname for fname in self.img_fnames
if sum([1 if fname.find(p) == 0 else 0 for p in cfg.DATA.PREFIX]) > 0 # 确保图片符合某一prefix
]
if filename is not None: # 只使用单一的一张图片的情况
self.img_fnames = [filename]
# 读取每个图片的点label数据,预保存在内存里从而进行二次过滤
self.cors = []
for filename in self.img_fnames:
with open(os.path.join(self.cor_dir, filename[:-4] + ".txt")) as f:
cor = np.array([line.strip().split() for line in f if line.strip()], np.float32)
self.cors.append(cor)
# 如果配置中对角点的个数有要求,执行过滤
if cfg.DATA.get("USE_CORNER"):
corner_count = [len(cor) // 2 for cor in self.cors]
mask = [count in cfg.DATA.USE_CORNER for count in corner_count]
self.img_fnames = [v for v, m in zip(self.img_fnames, mask) if m]
self.cors = [v for v, m in zip(self.cors, mask) if m]
# # TODO
# try:
# idx = self.img_fnames.index("TbHJrupSAjP_235d08ff9f3f40ce9fa9e97696265dda.png")
# except:
# idx = 0
# self.img_fnames = self.img_fnames[idx:idx + 1] * 1000
# self.cors = self.cors[idx: idx + 1] * 1000
# a = 1
def _check_dataset(self):
for fname in self.txt_fnames:
assert os.path.isfile(os.path.join(self.cor_dir, fname)), \
'%s not found' % os.path.join(self.cor_dir, fname)
# for fname in self.pkl_fnames:
# assert os.path.isfile(os.path.join(self.label_p_dir, fname)), \
# '%s not found' % os.path.join(self.label_p_dir, fname)
def __len__(self):
return len(self.img_fnames)
def __getitem__(self, idx):
return self.getItem(self.img_fnames[idx], self.cors[idx])
def getItem(self, filename, cor=None):
# TODO 当前的实现是基于动态由e_img生成label和六个面的的信息的
# 读取原图、角点label数据
img_path = os.path.join(self.img_dir, filename)
e_img = np.array(Image.open(img_path), np.float32)[..., :3] / 255.
if cor is None:
with open(os.path.join(self.cor_dir, filename[:-4] + ".txt")) as f:
cor = np.array([line.strip().split() for line in f if line.strip()], np.float32)
# fname = self.img_fnames[idx]
# P = self.P
# l = np.tan(np.deg2rad(self.FOV / 2))
# bin_num = self.bin_num
H = self.H
W = self.W
# Use cor make smooth angle label
# Corner with minimum x should at the beginning
cor = np.roll(cor[:, :2], -2 * np.argmin(cor[::2, 0]), 0)
# # Detect occlusion
# occlusion = find_occlusion(cor[::2].copy()).repeat(2)
AUG_RECORD = filename + " "
# 数据增强
# 只有train_mode才开数据增强
if self.train_mode or self.cfg.get("TEST_NEED_AUG", False):
# Stretch augmentation(把图片、label均进行缩放)
if self.cfg.DATA.AUGMENT.get("stretch"):
max_stretch = self.cfg.DATA.AUGMENT.stretch
if max_stretch == True: max_stretch = 2.0 # 默认值
xmin, ymin, xmax, ymax = cor2xybound(cor)
kx = np.random.uniform(0.5, max_stretch)
ky = np.random.uniform(0.5, max_stretch)
a = np.random.randint(2)
b = np.random.randint(2)
if a == 0:
kx = max(1 / kx, min(0.5 / xmin, 1.0))
else:
kx = min(kx, max(10.0 / xmax, 1.0))
if b == 0:
ky = max(1 / ky, min(0.5 / ymin, 1.0))
else:
ky = min(ky, max(10.0 / ymax, 1.0))
e_img, cor, _ = panostretch.pano_stretch(e_img, cor, kx, ky)
AUG_RECORD += "estre{:f}{:f}{:d}{:d} ".format(kx, ky, a, b)
# Random flip
if self.cfg.DATA.AUGMENT.get("flip") and np.random.randint(2) == 0:
e_img = np.flip(e_img, axis=1).copy()
cor[:, 0] = e_img.shape[1] - 1 - cor[:, 0]
AUG_RECORD += "efilp "
# Random erase in random position
if self.cfg.DATA.AUGMENT.get("erase") and np.random.randint(
self.cfg.DATA.AUGMENT.get("erase_EVERY", 2)) == 0:
# H, W = e_img.shape[:2]
n_holes = np.random.randint(self.cfg.DATA.AUGMENT.get("erase_COUNT", 10))
hole_length_y = self.cfg.DATA.AUGMENT.get("erase_SIZE", 50)
hole_length_x = self.cfg.DATA.AUGMENT.get("erase_SIZE", 50)
mask = np.ones((H, W, 3), np.float32)
noise = np.zeros((H, W, 3), np.float32)
for n in range(n_holes):
xhole = np.random.randint(W)
yhole = np.random.randint(H)
yhole1 = np.clip(yhole - hole_length_y // 2, 0, H)
yhole2 = np.clip(yhole + hole_length_y // 2, 0, H)
xhole1 = np.clip(xhole - hole_length_x // 2, 0, W)
xhole2 = np.clip(xhole + hole_length_x // 2, 0, W)
mask[yhole1:yhole2, xhole1:xhole2] = 0
noise[yhole1:yhole2, xhole1:xhole2] = np.random.rand(yhole2 - yhole1, xhole2 - xhole1, 3)
e_img = e_img * mask # + noise
if self.cfg.DATA.AUGMENT.get("bon_erase"):
# H, W = img.shape[:2]
n_holes = self.cfg.DATA.AUGMENT.get("erase_COUNT", 10) # 10
hole_length_y = self.cfg.DATA.AUGMENT.get("erase_SIZE", 50) # 50
hole_length_x = self.cfg.DATA.AUGMENT.get("erase_SIZE_X", 100) # 100
mask = np.ones((H, W, 3), np.float32)
noise = np.zeros((H, W, 3), np.float32)
bon_floor_x, bon_floor_y = cor[1::2, 0], cor[1::2, 1]
bon_ceil_x, bon_ceil_y = cor[0::2, 0], cor[0::2, 1]
bon_floor = np.interp(np.arange(W),
bon_floor_x,
bon_floor_y,
period=W)
bon_ceil = np.interp(np.arange(W), bon_ceil_x, bon_ceil_y, period=W)
for n in range(n_holes):
xhole = np.random.randint(W)
if True: # self.bon_erase:
if n % 2 == 0:
yhole = int(bon_floor[xhole])
else:
yhole = int(bon_ceil[xhole])
else: # if self.erase:
yhole = np.random.randint(H)
yhole1 = np.clip(yhole - hole_length_y // 2, 0, H)
yhole2 = np.clip(yhole + hole_length_y // 2, 0, H)
xhole1 = np.clip(xhole - hole_length_x // 2, 0, W)
xhole2 = np.clip(xhole + hole_length_x // 2, 0, W)
mask[yhole1:yhole2, xhole1:xhole2] = 0
noise[yhole1:yhole2,
xhole1:xhole2] = np.random.rand(yhole2 - yhole1,
xhole2 - xhole1, 3)
e_img = e_img * mask + noise
# Random gamma augmentation
if self.cfg.DATA.AUGMENT.get("gamma"):
p = np.random.uniform(1, 2)
if np.random.randint(2) == 0:
p = 1 / p
e_img = e_img ** p
# Random noise augmentation
if self.cfg.DATA.AUGMENT.get("noise"):
if np.random.randint(2) == 0:
noise = np.random.randn(*e_img.shape) * 0.05
e_img = np.clip(e_img + noise, 0, 1)
# TODO 把数据集存起来
# save_dir = "processed_input/4_stf"
# os.makedirs(save_dir, exist_ok=True)
# cv2.imwrite(os.path.join(save_dir, filename),
# cv2.cvtColor(np.round(e_img * 255).astype(np.uint8), cv2.COLOR_RGB2BGR))
# 到此e_img的数据增强结束,开始生成gt使用的数据
pres = generatePerspective(e_img, cor, self.view_name, self.view_args, self.view_size)
# 解析pres的内容 转换为label向量
p_imgs = []
xLabels, yLabels, cUpLabels, cDownLabels = [], [], [], []
peakss = []
liness = []
for d in pres:
p_img = torch.FloatTensor(d["img"].transpose([2, 0, 1]))
if self.train_mode or self.cfg.get("TEST_NEED_AUG", False):
AUG_RECORD += d["name"] + " "
# Stretch augmentation(把图片、label均进行缩放)
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("stretch") and np.random.randint(2) == 0:
max_stretch = self.cfg.DATA.PERSPECTIVE_AUGMENT.stretch
if max_stretch == True: max_stretch = 1.5 # 默认值
stretch_value = np.random.uniform(1.0, max_stretch)
originSize = p_img.shape[1:]
newSize = [round(v * stretch_value) for v in originSize]
ratio = np.array([n / o for o, n in zip(originSize, newSize)])
centerPos = np.array([(v - 1) // 2 for v in originSize])
p_img = transforms.Resize(newSize)(p_img)
p_img = transforms.CenterCrop(originSize)(p_img)
for line in d["lines"]:
line[3:5] = (line[3:5] - centerPos) * ratio + centerPos
line[5:7] = (line[5:7] - centerPos) * ratio + centerPos
AUG_RECORD += "stre{:f} ".format(stretch_value)
# Random flip
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("vertical_flip") and np.random.randint(3) == 0:
p_img = p_img.flip(-2)
for line in d["lines"]:
line[4:7:2] = p_img.shape[-2] - 1 - line[4:7:2]
AUG_RECORD += "vert "
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("horizontal_flip") and np.random.randint(3) == 0:
p_img = p_img.flip(-1)
for line in d["lines"]:
line[3:7:2] = p_img.shape[-1] - 1 - line[3:7:2]
AUG_RECORD += "hori "
rotated = False
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("rotation"):
r = np.random.randint(6)
if r == 0:
# 顺时针转90度
# 先转置,再水平翻转
p_img = p_img.transpose(-2, -1).flip(-1)
for line in d["lines"]:
line[3:7] = line[[4, 3, 6, 5]]
line[3:7:2] = p_img.shape[-1] - 1 - line[3:7:2]
rotated = True
AUG_RECORD += "rota0 "
elif r == 1:
# 逆时针转90度
# 先转置,再竖直翻转
p_img = p_img.transpose(-2, -1).flip(-2)
for line in d["lines"]:
line[3:7] = line[[4, 3, 6, 5]]
line[4:7:2] = p_img.shape[-2] - 1 - line[4:7:2]
rotated = True
AUG_RECORD += "rota1 "
# Random erase in random position
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("erase"):
p_img = self.randomEraser(p_img)
# label不会因为erasing而改变
# Random gamma augmentation
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("gamma"):
p = np.random.uniform(1, 2)
if np.random.randint(2) == 0:
p = 1 / p
p_img = p_img ** p
# Random noise augmentation
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("noise") and np.random.randint(2) == 0:
noise = torch.randn(p_img.shape) * 0.025
p_img = torch.clip(p_img + noise, 0, 1)
oldDirection = [vec[7] for vec in d["lines"]]
d["lines"], mask = linesPostProcess(d["lines"], p_img.shape[1:], d["name"] == "U" or d["name"] == "D",
return_mask=True)
oldDirection = [v for v, b in zip(oldDirection, mask) if b]
if rotated and not (d["name"] == "U" or d["name"] == "D"):
# 当中间面发生了旋转时,需要根据oldDirection,原来是0的现在是1(计算结果为0),原来是1的现在是0(计算结果为2)
for i, (oldValue, newValue) in enumerate(zip(oldDirection, [vec[7] for vec in d["lines"]])):
if oldValue == 0:
assert newValue == 0
d["lines"][i][7] = 1
elif oldValue == 1:
# assert newValue == 2 # 这里并不需要,因为如果是绝对竖直线,yLR=nan,照样会输出direct=1.
assert newValue != 0 # 所以newValue也可能是1而不是2。只断言不是0就好
d["lines"][i][7] = 0
p_imgs.append(p_img)
if self.cfg.MODEL.HOUGH.CLINE_TYPE == "NEW":
liness.append(np.array(d["lines"]))
try:
peaks = self.linesToPeaksNew(d["lines"], self.view_size)
except:
assert False, AUG_RECORD
peakss.append(peaks)
peaks_for_label = peaks
label_hw = self.view_size
xPeaks, yPeaks, cUpPeaks, cDownPeaks = peaks_for_label
xLabels.append(
self.generate_gradual_hough_label(xPeaks, label_hw[1], type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.XY))
yLabels.append(
self.generate_gradual_hough_label(yPeaks, label_hw[0], type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.XY))
cline_angle_num = label_hw[1] + label_hw[0] // 2 * 2 - 2
cUpLabels.append(
self.generate_gradual_hough_label(cUpPeaks, cline_angle_num, type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.CUPDOWN))
cDownLabels.append(
self.generate_gradual_hough_label(cDownPeaks, cline_angle_num, type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.CUPDOWN))
else:
raise NotImplementedError()
n_cor = len(cor)
gt_floor_coor = cor[1::2]
gt_ceil_coor = cor[0::2]
gt_floor_xyz = np.hstack([
post_proc.np_coor2xy(gt_floor_coor, self.ch, self.W, self.H, floorW=1, floorH=1),
np.zeros((n_cor // 2, 1)) + self.ch,
])
gt_c = np.sqrt((gt_floor_xyz[:, :2] ** 2).sum(1))
gt_v2 = post_proc.np_coory2v(gt_ceil_coor[:, 1], self.H)
gt_ceil_z = gt_c * np.tan(gt_v2)
height = np.array([gt_ceil_z.mean() - self.ch], dtype=np.float32)
# Convert all data to tensor
e_img = torch.FloatTensor(e_img.transpose([2, 0, 1]))
# angle = torch.FloatTensor(angle)
# up_bin256 = torch.FloatTensor(up_bin256.copy())
# down_bin256 = torch.FloatTensor(down_bin256.copy())
height = torch.FloatTensor(height)
out_dict = {
"filename": filename,
"e_img": e_img,
"cor": cor,
"height": height,
"p_imgs": torch.stack(p_imgs, 0),
"xLabels": np.array(xLabels).astype(np.float32),
"yLabels": np.array(yLabels).astype(np.float32),
"cUpLabels": np.array(cUpLabels).astype(np.float32),
"cDownLabels": np.array(cDownLabels).astype(np.float32),
"peaks": peakss,
"lines": liness
}
return out_dict
@staticmethod
def generate_gradual_hough_label(peaks, res_len, loop=False, type="exp", base=0.96):
"""
根据若干个峰值点,生成渐变的数组,越靠近峰值点值越大,从而用于网络的直接计算loss。
:param peaks 数组,各个峰值点
:param res_len 结果数组的长度
:param loop 计算结果距离的时候是否视为是一个循环
"""
res = []
res.append(cdist(peaks.reshape(-1, 1), np.arange(res_len, dtype=np.float).reshape(-1, 1), p=1))
if loop:
res.append(cdist(peaks.reshape(-1, 1), np.arange(res_len).reshape(-1, 1) + res_len, p=1))
res.append(cdist(peaks.reshape(-1, 1), np.arange(res_len).reshape(-1, 1) - res_len, p=1))
dist = np.min(res, 0)
if dist.shape[0] > 0:
nearest_dist = dist.min(0) # shape(res_len),每个点距离最近的peak的距离
else:
# TODO 对于没有peak的情况要怎么处理?当作距离是inf是否合理?这样label就是0了
nearest_dist = np.ones(dist.shape[1:], dtype=dist.dtype) * np.inf
if type == "exp":
return (base ** nearest_dist).reshape(-1)
elif type == "nearest_only":
return (nearest_dist.reshape(-1) <= 0.5).astype(nearest_dist.dtype)
elif type == "nearest_k":
return (nearest_dist.reshape(-1) <= base + 0.5).astype(nearest_dist.dtype)
else:
raise NotImplementedError()
def linesToPeaks(self, lines, img_hw):
"""
:return xPeaks, yPeaks, cUpPeaks(过中心的上半圈线), cDownPeaks(过中心的下半圈线)
"""
xPeaks, yPeaks, cUpPeaks, cDownPeaks = [], [], [], []
for line in lines:
if line[7] == 0:
xPeaks.append(np.mean(line[3:7:2]))
elif line[7] == 1:
yPeaks.append(np.mean(line[4:7:2]))
elif line[7] == 2:
yCenter = np.mean(line[4:7:2])
ks = (line[4:7:2] - ((img_hw[0] - 1) / 2)) / (line[3:7:2] - ((img_hw[1] - 1) / 2))
# 角度规定为斜率的arctan。即上半圆,最左侧为0度、顺时针增长到180度;下半圆,最右侧为0度,顺时针增长到180度
deg = np.rad2deg(np.arctan(ks))
deg[deg < 0] += 180
meanAngleDeg = np.mean(deg)
if yCenter <= img_hw[0] / 2:
cUpPeaks.append(meanAngleDeg)
else:
cDownPeaks.append(meanAngleDeg)
return np.array(xPeaks), np.array(yPeaks), np.array(cUpPeaks), np.array(cDownPeaks)
@staticmethod
def coord2AngleValue(x, y, img_hw):
"""
根据所属区域,求交线坐标,并直接转换为角度数量值
:param x,y 直接输入图片中的坐标即可,不是中心坐标系
:return 对应于new算法的angle值;0或1,表示上半图还是下半图
"""
h2, w2 = (img_hw[0] - 1) / 2, (img_hw[1] - 1) / 2
h2f = img_hw[0] // 2
x = x - w2
y = y - h2
if x <= y <= -x:
# 与左侧相交
y2 = y / x * -w2
if y <= 0:
r = h2f - 1 - h2 - y2
return r, 0
else:
r = h2f + img_hw[1] - 2 + h2 - y2
return r, 1
elif -x <= y <= x:
# 与右侧相交
y2 = y / x * w2
if y <= 0:
r = h2f + img_hw[1] - 2 + h2 + y2
return r, 0
else:
r = h2f - 1 - h2 + y2
return r, 1
elif -y < x < y:
# 与下侧相交
x2 = x / y * h2
r = h2f - 1 + w2 - x2
return r, 1
elif y < x < -y:
# 与上侧相交
x2 = x / y * -h2
r = h2f - 1 + w2 + x2
return r, 0
@staticmethod
def linesToPeaksNewCore(lines, img_hw):
"""
:input: lines(n, 5) 五维分别代表x1,y1,x2,y2,线在视图中的类型-0竖直线1水平线2过中心线
:return xPeaks, yPeaks, cUpPeaks(过中心的上半圈线), cDownPeaks(过中心的下半圈线)
"""
def autoAbs(v):
if isinstance(v, torch.Tensor):
return v.abs()
elif isinstance(v, np.ndarray):
return np.abs(v)
return abs(v)
def toNdarrayOrTensor(v, ref):
if isinstance(ref, torch.Tensor):
return ref.new_tensor(v)
else:
return np.array(v)
xPeaks, yPeaks, cUpPeaks, cDownPeaks = [], [], [], []
xLengths, yLengths, cUpLengths, cDownLengths = [], [], [], []
for line in lines:
length_ratio = autoAbs(line[0:2] - line[2:4]) / toNdarrayOrTensor(img_hw, line)[[1, 0]]
if line[4] == 0:
xPeaks.append(line[0:4:2].mean())
xLengths.append(length_ratio[1])
elif line[4] == 1:
yPeaks.append(line[1:4:2].mean())
yLengths.append(length_ratio[0])
elif line[4] == 2:
# 对两个端点,计算其对应的角度(以边缘坐标系值为单位),两值直接算术平均作为最终的代表角度
r1, p1 = PerspectiveDataset.coord2AngleValue(*line[0:2], img_hw)
r2, p2 = PerspectiveDataset.coord2AngleValue(*line[2:4], img_hw)
if p1 != p2:
warnings.warn("cline two endpoint is not in same updown part!")
midPointDis = np.abs(line[1::2] - ((img_hw[0] - 1) / 2))
if midPointDis.argmin() == 0: # 应调整一号点
if p2 == 0: # 2号点在上半图,1号点也放到上半图
line[1] = ((img_hw[0] - 1) / 2) - 0.01
else:
line[1] = ((img_hw[0] - 1) / 2) + 0.01
else:
if p1 == 0:
line[3] = ((img_hw[0] - 1) / 2) - 0.01
else:
line[3] = ((img_hw[0] - 1) / 2) + 0.01
r1, p1 = PerspectiveDataset.coord2AngleValue(*line[0:2], img_hw)
r2, p2 = PerspectiveDataset.coord2AngleValue(*line[2:4], img_hw)
assert p1 == p2, "cline two endpoint is not in same updown part!"
meanAngleValue = (r1 + r2) / 2 # 直接求算术平均
if p1 == 0:
cUpPeaks.append(meanAngleValue)
cUpLengths.append(length_ratio.max() * 2)
else:
cDownPeaks.append(meanAngleValue)
cDownLengths.append(length_ratio.max() * 2)
return (xPeaks, yPeaks, cUpPeaks, cDownPeaks), (xLengths, yLengths, cUpLengths, cDownLengths)
def linesToPeaksNew(self, lines, img_hw):
return [np.array(item) for item in self.linesToPeaksNewCore([line[3:8] for line in lines], img_hw)[0]]
@staticmethod
def collate(batch):
def collateByKey(batch, key):
if key == "cor":
return [PerspectiveDataset.collate(d[key]) for d in batch]
else:
return PerspectiveDataset.collate([d[key] for d in batch])
elem = batch[0]
if isinstance(elem, dict):
return {key: collateByKey(batch, key) for key in elem}
elif isinstance(elem, list) or isinstance(elem, tuple):
return [PerspectiveDataset.collate(d) if isinstance(d[0], list) or isinstance(d[0], tuple)
else [default_collate([v]).squeeze(0) for v in d]
for d in batch]
return default_collate(batch)
def cor2xybound(cor):
''' Helper function to clip max/min stretch factor '''
corU = cor[0::2]
corB = cor[1::2]
zU = -50
u = panostretch.coorx2u(corU[:, 0])
vU = panostretch.coory2v(corU[:, 1])
vB = panostretch.coory2v(corB[:, 1])
x, y = panostretch.uv2xy(u, vU, z=zU)
c = np.sqrt(x ** 2 + y ** 2)
zB = c * np.tan(vB)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
S = 3 / abs(zB.mean() - zU)
dx = [abs(xmin * S), abs(xmax * S)]
dy = [abs(ymin * S), abs(ymax * S)]
return min(dx), min(dy), max(dx), max(dy)
if __name__ == '__main__':
cfg = EasyDict()
data = EasyDict()
cfg.DATA = data
cfg.DATA.ROOT_DIR = "data/layoutnet_dataset"
dataset = PerspectiveDataset(cfg, "train")
d = dataset[0]
a = 1
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
seed = worker_info.seed
np.random.seed((seed + _) % 2 ** 32)
# # Avoid "cannot pickle KVReader object" error
# dataset.reader = KVReader(dataset.path, dataset.num_readers)
| 26,199 | 42.812709 | 118 | py |
DMH-Net | DMH-Net-main/train.py | import argparse
import os
# import ipdb
import sys
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from tqdm import trange
from config import cfg, cfg_from_yaml_file, cfg_from_list
from eval import valid
from misc.utils import save_model, load_trained_model
from model import ENCODER_RESNET, ENCODER_DENSENET, ENCODER_HOUGH, DMHNet
from perspective_dataset import PerspectiveDataset, worker_init_fn
GAMMA = 2
ALPHA_XY = 1.0
ALPHA_MATCH = 10.0
ALPHA_ANGLE = 1.0
ALPHA_HEIGHT = 1.0
def feed_forward(net, x, angle, up_bins, down_bins, edge, height, return_results=False):
up_bin256 = up_bins
down_bin256 = down_bins
x = x.to(device)
angle = angle.to(device)
up_bin256 = up_bin256.to(device)
down_bin256 = down_bin256.to(device)
edge = edge.to(device)
height = height.to(device)
losses = {}
angle_, up_xy_, down_xy_, edge_, height_, results_dict = net(x)
# Match loss
# Edge classification loss
loss_edg = F.binary_cross_entropy_with_logits(edge_, edge, reduction='none')
loss_edg[edge == 0.] *= 0.2
loss_edg = loss_edg.mean()
losses['edge'] = loss_edg
# Height loss
losses['height'] = ALPHA_HEIGHT * F.l1_loss(height_, height)
# X-Y classification loss
# losses['fuse_xy'] = ALPHA_XY * F.binary_cross_entropy_with_logits(fuse_xy_, up_bin256)
losses['up_xy'] = ALPHA_XY * F.binary_cross_entropy_with_logits(up_xy_, up_bin256)
losses['down_xy'] = ALPHA_XY * F.binary_cross_entropy_with_logits(down_xy_, down_bin256)
# Angle classification loss
loss_cor_ori = ALPHA_ANGLE * F.binary_cross_entropy_with_logits(angle_, angle)
# pt_cor = torch.exp(-loss_cor_ori)
losses['angle'] = loss_cor_ori
# ALPHA_ANGLE * ((1 - pt_cor)**GAMMA * loss_cor_ori).mean()
idx = torch.arange(256).view(1, 256, 1)
idx = idx.to(device)
up_reg = (idx * F.softmax(up_xy_, 2)).sum(2).squeeze(1)
down_reg = (idx * F.softmax(down_xy_, 2)).sum(2).squeeze(1)
ratio = up_reg / (down_reg + 1e-8)
losses['match'] = torch.abs(ratio - 1.).mean()
# Total loss
losses['total'] = losses['up_xy'] + losses['down_xy'] + losses['angle'] + losses['edge']
losses['total'] += losses['height']
losses['total'] += losses['match']
# For model selection
with torch.no_grad():
nobrain_baseline_xy = 1.
score_xy_up = 1 - (torch.sigmoid(up_xy_) - up_bin256).abs().mean() / nobrain_baseline_xy
score_xy_down = 1 - (torch.sigmoid(down_xy_) - down_bin256).abs().mean() / nobrain_baseline_xy
nobrain_baseline_angle = 1.
score_angle = 1 - (torch.sigmoid(angle_) - angle).abs().mean() / nobrain_baseline_angle
losses['score'] = (score_angle + score_xy_up + score_xy_down) / 3
results_dict['angle'] = angle_.detach()
results_dict['up_xy'] = up_xy_.detach()
results_dict['down_xy'] = down_xy_.detach()
if return_results:
return losses, results_dict
else:
return losses
def feature_viz(name, tb_writer):
def hook(model, input, output):
feat = output.detach()
feat_reshape = feat.view(-1, 1, feat.shape[2], feat.shape[3])
img = make_grid(feat_reshape, normalize=True)
tb_writer.add_image(name, img.cpu())
return hook
def visualize_item(x, y_cor, results_dict):
x = (x.numpy().transpose([1, 2, 0]) * 255).astype(np.uint8)
y_cor = y_cor.numpy()
gt_cor = np.zeros((30, 1024, 3), np.uint8)
gt_cor[:] = y_cor[0][None, :, None] * 255
img_pad = np.zeros((3, 1024, 3), np.uint8) + 255
cor_img = np.concatenate([gt_cor, img_pad, x], 0)
up_img = results_dict['up_img'].detach().cpu()[0]
up_img = (up_img.clone().numpy().transpose([1, 2, 0]) * 255).astype(np.uint8)
down_img = results_dict['down_img'].detach().cpu()[0]
down_img = (down_img.clone().numpy().transpose([1, 2, 0]) * 255).astype(np.uint8)
xy = torch.sigmoid(results_dict['up_xy']).detach().cpu()[0, 0].clone().numpy()
dir_x_up = np.concatenate([xy[:, 0][::-1], xy[:, 2]], 0)
dir_y_up = np.concatenate([xy[:, 1][::-1], xy[:, 3]], 0)
x_up_prob = np.zeros((30, 512, 3), np.uint8)
x_up_prob[:] = dir_x_up[None, :, None] * 255
y_up_prob = np.zeros((512, 30, 3), np.uint8)
y_up_prob[:] = dir_y_up[:, None, None] * 255
stich_up_canvas = np.zeros((30 + 3 + 512, 30 + 3 + 512, 3), np.uint8) + 255
stich_up_canvas[33:, 33:, :] = up_img
stich_up_canvas[33:, :30, :] = y_up_prob
stich_up_canvas[:30, 33:, :] = x_up_prob
xy = torch.sigmoid(results_dict['down_xy']).detach().cpu()[0, 0].clone().numpy()
dir_x_down = np.concatenate([xy[:, 0][::-1], xy[:, 2]], 0)
dir_y_down = np.concatenate([xy[:, 1][::-1], xy[:, 3]], 0)
x_down_prob = np.zeros((30, 512, 3), np.uint8)
x_down_prob[:] = dir_x_down[None, :, None] * 255
y_down_prob = np.zeros((512, 30, 3), np.uint8)
y_down_prob[:] = dir_y_down[:, None, None] * 255
stich_down_canvas = np.zeros((30 + 3 + 512, 30 + 3 + 512, 3), np.uint8) + 255
stich_down_canvas[33:, 33:, :] = down_img
stich_down_canvas[33:, :30, :] = y_down_prob
stich_down_canvas[:30, 33:, :] = x_down_prob
return cor_img, stich_up_canvas, stich_down_canvas
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', '-c', type=str, required=True, help='specify the config for training')
parser.add_argument('--id', required=True, help='experiment id to name checkpoints and logs')
parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints')
parser.add_argument('--logs', default='./logs', help='folder to logging')
parser.add_argument('--pth', default=None, help='path to load saved checkpoint.' '(finetuning)')
# Model related
parser.add_argument('--backbone',
default='drn38',
choices=ENCODER_RESNET + ENCODER_DENSENET + ENCODER_HOUGH,
help='backbone of the network')
parser.add_argument('--no_rnn', action='store_true', help='whether to remove rnn or not')
# Dataset related arguments
# TODO 原始代码交换了测试集与训练集 没有验证集
# 新代码用的就是原始的训练集和测试集
parser.add_argument('--train_root_dir',
default='data/layoutnet_dataset/test',
help='root directory to training dataset. '
'should contains img, label_cor subdirectories')
parser.add_argument('--valid_root_dir',
default='data/layoutnet_dataset/train',
help='root directory to validation dataset. '
'should contains img, label_cor subdirectories')
parser.add_argument('--no_flip', action='store_true', help='disable left-right flip augmentation')
parser.add_argument('--no_rotate', action='store_true', help='disable horizontal rotate augmentation')
parser.add_argument('--no_gamma', action='store_true', help='disable gamma augmentation')
parser.add_argument('--no_erase', action='store_true', help='disable radom erasing augmentation')
parser.add_argument('--no_noise', action='store_true', help='disable radom noise augmentation')
parser.add_argument('--no_pano_stretch', action='store_true', help='disable pano stretch')
parser.add_argument('--num_workers', '-j', type=int, help='numbers of workers for dataloaders')
# optimization related arguments
parser.add_argument('--freeze_earlier_blocks', default=-1, type=int)
parser.add_argument('--batch_size', '-b', type=int, help='batch size')
# parser.add_argument('--batch_size_valid', default=2, type=int, help='validation mini-batch size')
parser.add_argument('--epochs', type=int, help='epochs to train')
parser.add_argument('--optim', default='Adam', help='optimizer to use. only support SGD and Adam')
parser.add_argument('--lr', type=float, help='learning rate')
parser.add_argument('--lr_per_sample', type=float, help='learning rate per sample')
parser.add_argument('--lr_pow', default=0.9, type=float, help='power in poly to drop LR')
parser.add_argument('--warmup_lr', default=1e-6, type=float, help='starting learning rate for warm up')
parser.add_argument('--warmup_epochs', default=0, type=int, help='numbers of warmup epochs')
parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam')
parser.add_argument('--weight_decay', default=0, type=float, help='factor for L2 regularization')
parser.add_argument('--valid_visu', default=1, type=int, help='how many batches to be visualized when eval')
# Misc arguments
parser.add_argument('--no_cuda', action='store_true', help='disable cuda')
parser.add_argument('--seed', default=594277, type=int, help='manual seed')
parser.add_argument('--disp_iter', type=int, default=1, help='iterations frequency to display')
parser.add_argument('--save_every', type=int, default=25, help='epochs frequency to save state_dict')
parser.add_argument('--no_multigpus', action='store_true', help='disable data parallel')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
if args.batch_size is not None:
cfg.OPTIM.BATCH_SIZE = args.batch_size
if args.lr is not None or args.lr_per_sample is not None:
if args.lr is not None and args.lr_per_sample is not None:
assert False, "不能同时指定--lr和--lr_per_sample!"
if args.lr is not None:
cfg.OPTIM.LR = args.lr
if args.lr_per_sample is not None:
cfg.OPTIM.LR = args.lr_per_sample * cfg.OPTIM.BATCH_SIZE
if args.epochs is not None:
cfg.OPTIM.MAX_EPOCH = args.epochs
if args.num_workers is None:
args.num_workers = min(max(8, cfg.OPTIM.BATCH_SIZE), os.cpu_count()) if not sys.gettrace() else 0
device = torch.device('cpu' if args.no_cuda else 'cuda')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(os.path.join(args.ckpt, args.id), exist_ok=True)
# Create dataloader
dataset_train = PerspectiveDataset(cfg, "train", train_mode=True)
dataset_train_size = len(dataset_train)
print("num_workers: " + str(args.num_workers))
print("batch_size: " + str(cfg.OPTIM.BATCH_SIZE))
print("train_set_size: " + str(dataset_train_size))
loader_train = DataLoader(
dataset_train,
cfg.OPTIM.BATCH_SIZE,
collate_fn=dataset_train.collate,
shuffle=True,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
if args.valid_root_dir:
dataset_valid = PerspectiveDataset(cfg, "valid")
loader_valid = DataLoader(dataset_valid,
min(cfg.OPTIM.BATCH_SIZE, 4),
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
# Create model
if args.pth is not None:
print('Finetune model is given.')
print('Ignore --backbone and --no_rnn')
net = load_trained_model(DMHNet, args.pth, cfg, cfg.MODEL.get("BACKBONE", {}).get("NAME", "drn38"),
not args.no_rnn).to(device)
else:
net = DMHNet(cfg, cfg.MODEL.get("BACKBONE", {}).get("NAME", "drn38"), not args.no_rnn).to(device)
if not args.no_multigpus:
net = nn.DataParallel(net) # multi-GPU
# Create optimizer
print("LR {:f}".format(cfg.OPTIM.LR))
if cfg.OPTIM.TYPE == 'SGD':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=cfg.OPTIM.LR,
momentum=args.beta1,
weight_decay=args.weight_decay)
elif cfg.OPTIM.TYPE == 'Adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),
lr=cfg.OPTIM.LR,
betas=(args.beta1, 0.999),
weight_decay=args.weight_decay)
else:
raise NotImplementedError()
# Create tensorboard for monitoring training
tb_path = os.path.join(args.logs, args.id)
os.makedirs(tb_path, exist_ok=True)
tb_writer = SummaryWriter(log_dir=tb_path)
# Init variable
args.warmup_iters = args.warmup_epochs * len(loader_train)
# args.max_iters = args.epochs * len(loader_train)
# args.running_lr = args.warmup_lr if args.warmup_epochs > 0 else args.lr
milestones = cfg.OPTIM.get("SCHEDULER", {}).get("MILESTONES", [50, 100])
gamma = cfg.OPTIM.get("SCHEDULER", {}).get("GAMMA", 0.3)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
tb_writer.add_text("cfg", str(cfg))
tb_writer.add_text("args", str(args))
tb_writer.add_text("gpuid", os.environ.get("CUDA_VISIBLE_DEVICES", "None"))
# Init bin mask
# anglex = np.linspace(-256, 255, 512)
# angley = np.linspace(256, -255, 512)
# xv, yv = np.meshgrid(anglex, angley)
# # idx is the mapping table
# idx = (np.rad2deg(np.arctan2(xv, yv)) + 180 - 1).astype(int)
# binary_mask = np.zeros((512, 512, 360))
# for i in range(360):
# binary_mask[np.where(idx == i)[0], np.where(idx == i)[1], i] = 1
# binary_mask = torch.tensor(binary_mask, dtype=torch.float32)
best_valid_score = 0 # 筛选最佳模型:以3DIoU为准
# Start training
for ith_epoch in trange(1, cfg.OPTIM.MAX_EPOCH + 1, desc='Epoch', unit='ep'):
# Train phase
net.train()
# torch.cuda.empty_cache()
iterator_train = iter(loader_train)
cur_sample_count = 0
for _ in trange(len(loader_train), desc='Train ep%s' % ith_epoch, position=1):
# Set learning rate
# adjust_learning_rate(optimizer, args)
input = next(iterator_train)
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
cur_sample_count += len(input["p_imgs"])
tb_total_sample_count = (ith_epoch - 1) * dataset_train_size + cur_sample_count
losses, results_dict = net(input)
for k, v in losses.items():
k = 'train/%s' % k
tb_writer.add_scalar(k, v.item(), tb_total_sample_count)
loss = losses['total']
# backprop
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(net.parameters(), 1.0, norm_type=2)
optimizer.step()
tb_writer.add_scalar('train/lr', optimizer.param_groups[0]["lr"], ith_epoch)
# Valid phase
valid_loss, imgs, metrics = valid(cfg, net, loader_valid, dataset_valid, device, args.valid_visu, valid_epoch=ith_epoch)
if cfg.get("TEST_METRIC", False):
dataset_test = PerspectiveDataset(cfg, "test")
loader_test = DataLoader(dataset_test,
min(cfg.OPTIM.BATCH_SIZE, 4),
collate_fn=dataset_test.collate,
shuffle=False,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
test_loss, test_imgs, test_metrics = valid(cfg, net, loader_test, dataset_test, device, 0)
for k, v in test_metrics.items():
print("{:s} {:f}".format(k, v))
tb_writer.add_scalar('testmetric/%s' % k, v, ith_epoch)
for k, v in imgs.items():
tb_writer.add_image('valid/{:s}'.format(k), v, ith_epoch, dataformats="HWC")
for k, v in valid_loss.items():
print("{:s} {:f}".format(k, v))
tb_writer.add_scalar('valid/%s' % k, v, ith_epoch)
for k, v in metrics.items():
print("{:s} {:f}".format(k, v))
tb_writer.add_scalar('metric/%s' % k, v, ith_epoch)
# Save best validation loss model
if "3DIoU" in metrics:
valid_score = metrics["3DIoU"]
else:
valid_score = 100 - valid_loss["total"] # 无后处理训练时,筛选模型使用
if valid_score >= best_valid_score:
best_valid_score = valid_score
print("save BEST VALID ckpt " + str(ith_epoch))
save_model(net, os.path.join(args.ckpt, args.id, 'best_valid.pth'), args)
# Periodically save model
if ith_epoch % args.save_every == 0:
print("save ckpt " + str(ith_epoch))
save_model(net, os.path.join(args.ckpt, args.id, 'epoch_%d.pth' % ith_epoch), args)
scheduler.step()
if cfg.get("FINAL_EVAL", False):
print("现在开始finalEval!")
commandLine = "python eval.py --cfg_file {:s} --ckpt ckpt/{:s}/best_valid.pth --print_detail --output_file".format(args.cfg_file, args.id)
if cfg.get("FINAL_EVAL_METHOD"):
commandLine += " --set POST_PROCESS.METHOD {:s}".format(cfg.FINAL_EVAL_METHOD)
print("要执行的命令行 " + commandLine)
os.system(commandLine)
| 18,050 | 44.583333 | 146 | py |
DMH-Net | DMH-Net-main/e2pconvert_torch/convertExUtils.py | from functools import reduce
import numpy as np
import torch
from py360convert import rotation_matrix
from e2pconvert_torch import torch360convert
def rotationMatrix(u, v, in_rot):
Rx = rotation_matrix(v, [1, 0, 0])
Ry = rotation_matrix(u, [0, 1, 0])
Ri = rotation_matrix(in_rot, np.array([0, 0, 1.0]).dot(Rx).dot(Ry))
return Rx.dot(Ry).dot(Ri)
def unitxyzToPerspectiveCoord(input, fov_deg, u_deg, v_deg, out_hw, in_rot_deg=0):
"""
把unitxyz转化为单个perspective image上的xy像素坐标。
:param input: (n, 3) n个点、每个点三维坐标(unitxyz)
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 元组。第一个元素是(n, 2) n个点、每个点在图像上的两维坐标(以像素为单位);第二个元素是(n, 3),每个点在透视坐标系中的坐标
"""
assert len(input.shape) == 2 and input.shape[1] == 3
h_fov, v_fov = fov_deg[0] * np.pi / 180, fov_deg[1] * np.pi / 180
in_rot = in_rot_deg * np.pi / 180
u = -u_deg * np.pi / 180
v = v_deg * np.pi / 180
rotMat = rotationMatrix(u, v, in_rot)
rotMat = input.new_tensor(rotMat)
imgXyz = input.matmul(rotMat.T) # 对于旋转矩阵,其逆等于其转置
imgXyz = imgXyz / torch.abs(imgXyz[:, 2:]) # 使得z变为1
# 根据x y和fov,计算出可视化区域的range
x_max = np.tan(h_fov / 2)
y_max = np.tan(v_fov / 2)
# 将像素范围和range范围建立线性对应
normed_pos = imgXyz[:, :2] / torch.tensor([x_max, -y_max], dtype=imgXyz.dtype, device=imgXyz.device) / 2 + 0.5
pos = normed_pos * torch.tensor([out_hw[1] - 1, out_hw[0] - 1], dtype=imgXyz.dtype, device=imgXyz.device)
return pos, imgXyz
def coordE2P(input_pts, img, fov_deg, u_deg, v_deg, out_hw, in_rot_deg=0, isInputUv=False):
"""
把equirect下的坐标转换为perspective下的坐标
:param input: (n, 3) n个点、每个点三维坐标(unitxyz)
:param img: 全景equirect图像
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 元组。第一个元素是(n, 2) n个点、每个点两维坐标;第二个元素是整数类型(n) 表示每个点的类型:
0表示点不在180度视角内(在相机后面),xy值无意义;1表示点在180度视角内、但不在图片内,2表示点在图片内。
"""
if not isInputUv: uv = torch360convert.coor2uv(input_pts, img.shape[1], img.shape[2])
else:
uv = input_pts
xyz = torch360convert.uv2unitxyz(uv)
result, imgXyz = unitxyzToPerspectiveCoord(xyz, fov_deg, u_deg, v_deg, out_hw, in_rot_deg)
type = (imgXyz[:, 2] > 0).to(torch.int8)
inimage_mask = reduce(torch.logical_and, [type, result[:, 0] >= 0, result[:, 0] <= out_hw[1] - 1, result[:, 1] >= 0,
result[:, 1] <= out_hw[0] - 1])
type[inimage_mask] = 2
return result, type, imgXyz
| 2,723 | 35.810811 | 120 | py |
DMH-Net | DMH-Net-main/e2pconvert_torch/e2plabelconvert.py | import numpy as np
import torch
from .convertExUtils import coordE2P
def linesPostProcess(lines, img_hw, is_updown_view):
"""
对线进行处理,筛选掉看不见的线、对线的起终点进行规范化处理
:param lines:(k,7),表示图中的k条线。每条线用七个数表示,前两个是端点在points中的序号,然后是线的类型:0是竖直的墙壁线,1是天花板线,2是地板线,然后是起点的x、y坐标,然后是终点的x、y坐标
:param img_hw:(2),图片的宽和高
:return (m,8)图中能看到的m条线。每条线前7个数含义同上,第8个数表示线在视图中的方向:0竖直线1水平线2过中心线
"""
# !!!根据10月12日推导出的结果:使用py360convert的e2p、e2c变换得出的点,3d空间中坐标-1~1对应的范围应当是2d图片中像素0~h-1,而不是0~h!
# 以h=512为例,在图片2d坐标系下的511.5是没有意义(在当前平面内不可见)的!
# 因此作出修改!
# 虽然分析觉得,这个小错误并不会对结果有实质性的影响(因为最多只会差一个像素),但还是改过来吧!
# unitxyzToPerspectiveCoord、coordE2P、generateOnePerspectiveLabel、lineCoordToRatio四个函数也做了相同的修改。
img_hw = [img_hw[0] - 1, img_hw[1] - 1]
def processPoint(point, k):
xInRange = False
if point[0] < 0:
y = k * (0 - point[0]) + point[1]
if 0 <= y <= img_hw[0]:
return torch.stack([y.new_tensor(0), y])
elif point[0] > img_hw[1]:
y = k * (img_hw[1] - point[0]) + point[1]
if 0 <= y <= img_hw[0]:
return torch.stack([y.new_tensor(img_hw[1]), y])
else:
xInRange = True
if point[1] < 0:
x = point[0]
if not torch.isinf(k): x = x + (0 - point[1]) / k # 否则,当k是inf时,虽然前向传播没问题,但反向传播会出现grad=nan
if 0 <= x <= img_hw[1]:
return torch.stack([x, x.new_tensor(0)])
elif point[1] > img_hw[0]:
x = point[0]
if not torch.isinf(k): x = x + (img_hw[0] - point[1]) / k
if 0 <= x <= img_hw[1]:
return torch.stack([x, x.new_tensor(img_hw[0])])
else:
if xInRange:
return point
return None
result = []
for line in lines:
k = (line[6] - line[4]) / (line[5] - line[3])
p1Res = processPoint(line[3:5], k)
p2Res = processPoint(line[5:7], k)
if p1Res is not None and p2Res is not None:
if line[2] == 0:
direct = 2 if is_updown_view else 0
else:
if is_updown_view:
direct = 1 if -1 <= k <= 1 else 0
else:
yLR = (k * (0 - p1Res[0]) + p1Res[1], k * (img_hw[1] - p1Res[0]) + p1Res[1])
if (yLR[0] < img_hw[0] / 2 and yLR[1] > img_hw[0] / 2) or (
yLR[1] < img_hw[0] / 2 and yLR[0] > img_hw[0] / 2):
direct = 2
else:
direct = 1
result.append(torch.cat((line[0:3], p1Res, p2Res, line.new_tensor([direct]))))
return result
def generateOnePerspectiveLabel(e_img, e_label, fov_deg, u_deg, v_deg, out_hw,
in_rot_deg=0, isInputUv=False):
"""
根据给定的perspective参数,生成label信息,并可以可视化。
:param e_img: 全景equirect图像
:param e_label: (n, 2)在全景equirect坐标系下的gt角点坐标
:param ax: 画图的pyplot.Axes
# :param img_save_path: 若传入True,则把结果图像通过plt.show显示;若传入其他字符串,则保存为文件;否则不显示和保存结果图像。
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 字典,是图片中的点和线段信息,内含"points" "point_types" "lines" "lines" 三个字段。
points:(n,2),表示点在图片中的x、y坐标(浮点数)
point_types:整数(n),对应于points中每个点表示其类型,0表示点不在180度视角内(在相机后面),xy值无意义;1表示点在180度视角内、但不在图片内,2表示点在图片内。
lines:(k,8),表示图中的k条线。每条线用七个数表示,前两个是端点在points中的序号,然后是线的类型:0是竖直的墙壁线,1是天花板线,2是地板线,然后是起点的x、y坐标,然后是终点的x、y坐标,然后是线在视图中的类型-0竖直线1水平线2过中心线
"""
points, point_types, imgXyz = coordE2P(e_label, e_img, fov_deg, u_deg, v_deg, out_hw, in_rot_deg, isInputUv=isInputUv)
lines = []
corner_count = e_label.shape[0]
# 定义三种交线。内含12个数组代表长方体的12条边。
# 每个数组的前两个元素对应着label中按顺序给定的点的序号。
# 第三个元素表示这条线的类别。0是竖直的墙壁线,1是天花板线,2是地板线。
LINES = []
for i in range(0, corner_count, 2):
LINES.append([i, i + 1, 0])
LINES.append([i, (i + 2) % corner_count, 1])
LINES.append([i + 1, (i + 3) % corner_count, 2])
for l in LINES:
if point_types[l[0]] > 0 and point_types[l[1]] > 0:
# 两个点都在相机前方180度视角范围内
lines.append(torch.cat([points.new_tensor(l), points[l[0]], points[l[1]]]))
elif point_types[l[0]] == 2 or point_types[l[1]] == 2:
# 只有一个点在图像内、另一个点在相机后方
# 从连成的直线上找合适的点,这个点投影位于边线上
line = torch.cat([points.new_tensor(l), points[l[0]], points[l[1]]])
if point_types[l[0]] == 2:
p1 = imgXyz[l[0]]
p2 = imgXyz[l[1]]
toFill = 1
else:
p1 = imgXyz[l[1]]
p2 = imgXyz[l[0]]
toFill = 0
tantheta = np.tan((180 - fov_deg[0]) / 2 * np.pi / 180)
# pc:p1和p2连线,与FOV平面的交点。k应该能保证在0.5~1之间。pc的x应该能保证非常接近边缘(1或-1)?
k1 = (p2[0] * tantheta - p2[2]) / ((p1[2] - p2[2]) - ((p1[0] - p2[0]) * tantheta))
k2 = (p2[0] * -tantheta - p2[2]) / ((p1[2] - p2[2]) - ((p1[0] - p2[0]) * -tantheta))
k = k1 if 0.5 <= k1 <= 1 else k2
pc = p1 * k + p2 * (1 - k)
assert 0.5 <= k <= 1 and pc[2] > 0, "k error"
pc = pc / pc[2]
assert -0.01 <= (abs(pc[0]) - 1) <= 0.01, "pc error"
# 把pc的坐标,往图像上映射
h_fov, v_fov = fov_deg[0] * np.pi / 180, fov_deg[1] * np.pi / 180
x_max = np.tan(h_fov / 2)
y_max = np.tan(v_fov / 2)
normed_pos = pc[:2] / torch.tensor([x_max, -y_max], dtype=imgXyz.dtype, device=imgXyz.device) / 2 + 0.5
pos = normed_pos * torch.tensor([out_hw[1] - 1, out_hw[0] - 1], dtype=imgXyz.dtype, device=imgXyz.device)
if toFill == 0:
line = torch.cat([line[0:3], pos, line[5:7]])
elif toFill == 1:
line = torch.cat([line[0:5], pos])
# line[2 * toFill + 3:2 * toFill + 5] = pos # 把pc点的图像坐标填入
lines.append(line)
lines = linesPostProcess(lines, out_hw, v_deg != 0) # 去除看不见的线
result = {
"points": points,
"point_types": point_types,
"lines": lines
}
return result
| 6,202 | 41.197279 | 132 | py |
DMH-Net | DMH-Net-main/e2pconvert_torch/torch360convert.py | import numpy as np
import torch
def coor2uv(coorxy, h, w):
coor_x, coor_y = coorxy[:, 0:1], coorxy[:, 1:2]
u = ((coor_x + 0.5) / w - 0.5) * 2 * np.pi
v = -((coor_y + 0.5) / h - 0.5) * np.pi
return torch.cat([u, v], -1)
def uv2unitxyz(uv):
u, v = uv[:, 0:1], uv[:, 1:2]
y = torch.sin(v)
c = torch.cos(v)
x = c * torch.sin(u)
z = c * torch.cos(u)
return torch.cat([x, y, z], -1)
def uv2coor(uv, h, w):
'''
uv: ndarray in shape of [..., 2]
h: int, height of the equirectangular image
w: int, width of the equirectangular image
'''
u, v = uv[:, 0:1], uv[:, 1:2]
coor_x = (u / (2 * np.pi) + 0.5) * w - 0.5
coor_y = (-v / np.pi + 0.5) * h - 0.5
return torch.cat([coor_x, coor_y], -1)
def xyz2uv(xyz):
'''
xyz: ndarray in shape of [..., 3]
'''
x, y, z = xyz[:, 0:1], xyz[:, 1:2], xyz[:, 2:3]
u = torch.atan2(x, z)
c = torch.sqrt(x ** 2 + z ** 2)
v = torch.atan2(y, c)
return torch.cat([u, v], -1)
| 1,010 | 20.978261 | 51 | py |
DMH-Net | DMH-Net-main/postprocess/LayoutNetv2.py | import numpy as np
import scipy.signal
import torch
from scipy.ndimage.filters import maximum_filter
from torch import optim
import postprocess.LayoutNet_post_proc2 as post_proc
from scipy.ndimage import convolve, map_coordinates
from shapely.geometry import Polygon
def LayoutNetv2PostProcessMain(cor_img: np.ndarray, edg_img: np.ndarray) -> np.ndarray:
"""
:param cor_img ndarray<512,1024>
:param edg_img ndarray<512,1024,3>
"""
# general layout, tp view
cor_ = cor_img.sum(0)
cor_ = (cor_ - np.amin(cor_)) / np.ptp(cor_)
min_v = 0.25 # 0.05
xs_ = find_N_peaks(cor_, r=26, min_v=min_v, N=None)[0]
# spetial case for too less corner
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0.05, N=None)[0]
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0, N=None)[0]
# get ceil and floor line
ceil_img = edg_img[:, :, 1]
floor_img = edg_img[:, :, 2]
ceil_idx = np.argmax(ceil_img, axis=0)
floor_idx = np.argmax(floor_img, axis=0)
# Init floor/ceil plane
z0 = 50
force_cuboid = False
_, z1 = post_proc.np_refine_by_fix_z(ceil_idx, floor_idx, z0)
# Generate general wall-wall
cor, xy_cor = post_proc.gen_ww(xs_, ceil_idx, z0, tol=abs(0.16 * z1 / 1.6), force_cuboid=force_cuboid)
if not force_cuboid:
# Check valid (for fear self-intersection)
xy2d = np.zeros((len(xy_cor), 2), np.float32)
for i in range(len(xy_cor)):
xy2d[i, xy_cor[i]['type']] = xy_cor[i]['val']
xy2d[i, xy_cor[i - 1]['type']] = xy_cor[i - 1]['val']
if not Polygon(xy2d).is_valid:
# actually it's not force cuboid, just assume all corners are visible, go back to original LayoutNet initialization
# print(
# 'Fail to generate valid general layout!! '
# 'Generate cuboid as fallback.',
# file=sys.stderr)
cor_id = get_ini_cor(cor_img, 21, 3)
force_cuboid = True
if not force_cuboid:
# Expand with btn coory
cor = np.hstack([cor, post_proc.infer_coory(cor[:, 1], z1 - z0, z0)[:, None]])
# Collect corner position in equirectangular
cor_id = np.zeros((len(cor) * 2, 2), np.float32)
for j in range(len(cor)):
cor_id[j * 2] = cor[j, 0], cor[j, 1]
cor_id[j * 2 + 1] = cor[j, 0], cor[j, 2]
# refinement
cor_id = optimize_cor_id(cor_id, edg_img, cor_img, num_iters=100, verbose=False)
return cor_id
def find_N_peaks(signal, r=29, min_v=0.05, N=None):
max_v = maximum_filter(signal, size=r, mode='wrap')
pk_loc = np.where(max_v == signal)[0]
pk_loc = pk_loc[signal[pk_loc] > min_v]
# check for odd case, remove one
if (pk_loc.shape[0]%2)!=0:
pk_id = np.argsort(-signal[pk_loc])
pk_loc = pk_loc[pk_id[:-1]]
pk_loc = np.sort(pk_loc)
if N is not None:
order = np.argsort(-signal[pk_loc])
pk_loc = pk_loc[order[:N]]
pk_loc = pk_loc[np.argsort(pk_loc)]
return pk_loc, signal[pk_loc]
def get_ini_cor(cor_img, d1=21, d2=3):
cor = convolve(cor_img, np.ones((d1, d1)), mode='constant', cval=0.0)
cor_id = []
cor_ = cor_img.sum(0)
cor_ = (cor_ - np.amin(cor_)) / np.ptp(cor_)
min_v = 0.25 # 0.05
xs_ = find_N_peaks(cor_, r=26, min_v=min_v, N=None)[0]
# spetial case for too less corner
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0.05, N=None)[0]
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0, N=None)[0]
X_loc = xs_
for x in X_loc:
x_ = int(np.round(x))
V_signal = cor[:, max(0, x_ - d2):x_ + d2 + 1].sum(1)
y1, y2 = find_N_peaks_conv(V_signal, prominence=None,
distance=20, N=2)[0]
cor_id.append((x, y1))
cor_id.append((x, y2))
cor_id = np.array(cor_id, np.float64)
return cor_id
def find_N_peaks_conv(signal, prominence, distance, N=4):
locs, _ = scipy.signal.find_peaks(signal,
prominence=prominence,
distance=distance)
pks = signal[locs]
pk_id = np.argsort(-pks)
pk_loc = locs[pk_id[:min(N, len(pks))]]
pk_loc = np.sort(pk_loc)
return pk_loc, signal[pk_loc]
def optimize_cor_id(cor_id, scoreedg, scorecor, num_iters=100, verbose=False):
assert scoreedg.shape == (512, 1024, 3)
assert scorecor.shape == (512, 1024)
Z = -1
ceil_cor_id = cor_id[0::2]
floor_cor_id = cor_id[1::2]
ceil_cor_id, ceil_cor_id_xy = constraint_cor_id_same_z(ceil_cor_id, scorecor, Z)
# ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(4).reshape(-1, 1) + Z])
ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(ceil_cor_id.shape[0]).reshape(-1, 1) + Z])
# TODO: revise here to general layout
# pc = (ceil_cor_id_xy[0] + ceil_cor_id_xy[2]) / 2
# print(ceil_cor_id_xy)
if abs(ceil_cor_id_xy[0, 0] - ceil_cor_id_xy[1, 0]) > abs(ceil_cor_id_xy[0, 1] - ceil_cor_id_xy[1, 1]):
ceil_cor_id_xy = np.concatenate((ceil_cor_id_xy[1:, :], ceil_cor_id_xy[:1, :]), axis=0)
# print(cor_id)
# print(ceil_cor_id_xy)
pc = np.mean(ceil_cor_id_xy, axis=0)
pc_vec = ceil_cor_id_xy[0] - pc
pc_theta = vecang(pc_vec, ceil_cor_id_xy[1] - pc)
pc_height = fit_avg_z(floor_cor_id, ceil_cor_id_xy, scorecor)
if ceil_cor_id_xy.shape[0] > 4:
pc_theta = np.array([ceil_cor_id_xy[1, 1]])
for c_num in range(2, ceil_cor_id_xy.shape[0] - 1):
if (c_num % 2) == 0:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num, 0])
else:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num, 1])
with torch.enable_grad():
scoreedg = torch.FloatTensor(scoreedg)
scorecor = torch.FloatTensor(scorecor)
pc = torch.FloatTensor(pc)
pc_vec = torch.FloatTensor(pc_vec)
pc_theta = torch.FloatTensor([pc_theta])
pc_height = torch.FloatTensor([pc_height])
pc.requires_grad = True
pc_vec.requires_grad = True
pc_theta.requires_grad = True
pc_height.requires_grad = True
# print(pc_theta)
# time.sleep(2)
# return cor_id
optimizer = optim.SGD([
pc, pc_vec, pc_theta, pc_height
], lr=1e-3, momentum=0.9)
best = {'score': 1e9}
for i_step in range(num_iters):
i = i_step if verbose else None
optimizer.zero_grad()
score = project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i)
if score.item() < best['score']:
best['score'] = score.item()
best['pc'] = pc.clone()
best['pc_vec'] = pc_vec.clone()
best['pc_theta'] = pc_theta.clone()
best['pc_height'] = pc_height.clone()
score.backward()
optimizer.step()
pc = best['pc']
pc_vec = best['pc_vec']
pc_theta = best['pc_theta']
pc_height = best['pc_height']
opt_cor_id = pc2cor_id(pc, pc_vec, pc_theta, pc_height).detach().numpy()
split_num = int(opt_cor_id.shape[0] // 2)
opt_cor_id = np.stack([opt_cor_id[:split_num], opt_cor_id[split_num:]], axis=1).reshape(split_num * 2, 2)
# print(opt_cor_id)
# print(cor_id)
# time.sleep(500)
return opt_cor_id
def constraint_cor_id_same_z(cor_id, cor_img, z=-1):
# Convert to uv space
cor_id_u = ((cor_id[:, 0] + 0.5) / cor_img.shape[1] - 0.5) * 2 * np.pi
cor_id_v = ((cor_id[:, 1] + 0.5) / cor_img.shape[0] - 0.5) * np.pi
# Convert to xyz space (z=-1)
cor_id_c = z / np.tan(cor_id_v)
cor_id_xy = np.stack([
cor_id_c * np.cos(cor_id_u),
cor_id_c * np.sin(cor_id_u),
], axis=0).T
# # Fix 2 diagonal corner, move the others
# cor_id_score = map_coordinates(cor_img, [cor_id[:, 1], cor_id[:, 0]])
# if cor_id_score[0::2].sum() > cor_id_score[1::2].sum():
# idx0, idx1 = 0, 1
# else:
# idx0, idx1 = 1, 0
# pc = cor_id_xy[idx0::2].mean(0, keepdims=True)
# radius2 = np.sqrt(((cor_id_xy[idx0::2] - pc) ** 2).sum(1)).mean()
# d = cor_id_xy[idx1::2] - pc
# d1 = d[0]
# d2 = d[1]
# theta1 = (np.arctan2(d1[1], d1[0]) + 2 * np.pi) % (2 * np.pi)
# theta2 = (np.arctan2(d2[1], d2[0]) + 2 * np.pi) % (2 * np.pi)
# theta2 = theta2 - np.pi
# theta2 = (theta2 + 2 * np.pi) % (2 * np.pi)
# theta = (theta1 + theta2) / 2
# d[0] = (radius2 * np.cos(theta), radius2 * np.sin(theta))
# theta = theta - np.pi
# d[1] = (radius2 * np.cos(theta), radius2 * np.sin(theta))
# cor_id_xy[idx1::2] = pc + d
# Convert refined xyz back to uv space
cor_id_uv = np.stack([
np.arctan2(cor_id_xy[:, 1], cor_id_xy[:, 0]),
np.arctan2(z, np.sqrt((cor_id_xy ** 2).sum(1))),
], axis=0).T
# Convert to image index
col = (cor_id_uv[:, 0] / (2 * np.pi) + 0.5) * cor_img.shape[1] - 0.5
row = (cor_id_uv[:, 1] / np.pi + 0.5) * cor_img.shape[0] - 0.5
return np.stack([col, row], axis=0).T, cor_id_xy
def fit_avg_z(cor_id, cor_id_xy, cor_img):
score = map_coordinates(cor_img, [cor_id[:, 1], cor_id[:, 0]])
c = np.sqrt((cor_id_xy ** 2).sum(1))
cor_id_v = ((cor_id[:, 1] + 0.5) / cor_img.shape[0] - 0.5) * np.pi
z = c * np.tan(cor_id_v)
fit_z = (z * score).sum() / score.sum()
return fit_z
def map_coordinates_Pytorch(input, coordinates):
''' PyTorch version of scipy.ndimage.interpolation.map_coordinates
input: (H, W)
coordinates: (2, ...)
'''
h = input.shape[0]
w = input.shape[1]
def _coordinates_pad_wrap(h, w, coordinates):
coordinates[0] = coordinates[0] % h
coordinates[1] = coordinates[1] % w
return coordinates
co_floor = torch.floor(coordinates).long()
co_ceil = torch.ceil(coordinates).long()
d1 = (coordinates[1] - co_floor[1].float())
d2 = (coordinates[0] - co_floor[0].float())
co_floor = _coordinates_pad_wrap(h, w, co_floor)
co_ceil = _coordinates_pad_wrap(h, w, co_ceil)
f00 = input[co_floor[0], co_floor[1]]
f10 = input[co_floor[0], co_ceil[1]]
f01 = input[co_ceil[0], co_floor[1]]
f11 = input[co_ceil[0], co_ceil[1]]
fx1 = f00 + d1 * (f10 - f00)
fx2 = f01 + d1 * (f11 - f01)
return fx1 + d2 * (fx2 - fx1)
def project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i_step=None):
# Sample corner loss
corid = pc2cor_id(pc, pc_vec, pc_theta, pc_height)
corid_coordinates = torch.stack([corid[:, 1], corid[:, 0]])
loss_cor = -map_coordinates_Pytorch(scorecor, corid_coordinates).mean()
# Sample boundary loss
if pc_theta.numel()==1:
p1 = pc + pc_vec
p2 = pc + rotatevec(pc_vec, pc_theta)
p3 = pc - pc_vec
p4 = pc + rotatevec(pc_vec, pc_theta - np.pi)
segs = [
pts_linspace(p1, p2),
pts_linspace(p2, p3),
pts_linspace(p3, p4),
pts_linspace(p4, p1),
]
else:
ps = pc + pc_vec
ps = ps.view(-1,2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:,:]),0)
if (c_num % 2) == 0:
ps[-1,1] = pc_theta[0,c_num]
else:
ps[-1,0] = pc_theta[0,c_num]
ps = torch.cat((ps, ps[-1:,:]),0)
ps[-1,1] = ps[0,1]
segs = []
for c_num in range(ps.shape[0]-1):
segs.append(pts_linspace(ps[c_num,:], ps[c_num+1,:]))
segs.append(pts_linspace(ps[-1,:], ps[0,:]))
# ceil-wall
loss_ceilwall = 0
for seg in segs:
ceil_uv = xyz2uv(seg, z=-1)
ceil_idx = uv2idx(ceil_uv, 1024, 512)
ceil_coordinates = torch.stack([ceil_idx[:, 1], ceil_idx[:, 0]])
loss_ceilwall -= map_coordinates_Pytorch(scoreedg[..., 1], ceil_coordinates).mean() / len(segs)
# floor-wall
loss_floorwall = 0
for seg in segs:
floor_uv = xyz2uv(seg, z=pc_height)
floor_idx = uv2idx(floor_uv, 1024, 512)
floor_coordinates = torch.stack([floor_idx[:, 1], floor_idx[:, 0]])
loss_floorwall -= map_coordinates_Pytorch(scoreedg[..., 2], floor_coordinates).mean() / len(segs)
#losses = 1.0 * loss_cor + 0.1 * loss_wallwall + 0.5 * loss_ceilwall + 1.0 * loss_floorwall
losses = 1.0 * loss_cor + 1.0 * loss_ceilwall + 1.0 * loss_floorwall
# if i_step is not None:
# with torch.no_grad():
# print('step %d: %.3f (cor %.3f, wall %.3f, ceil %.3f, floor %.3f)' % (
# i_step, losses,
# loss_cor, loss_wallwall,
# loss_ceilwall, loss_floorwall))
return losses
def vecang(vec1, vec2):
vec1 = vec1 / np.sqrt((vec1 ** 2).sum())
vec2 = vec2 / np.sqrt((vec2 ** 2).sum())
return np.arccos(np.dot(vec1, vec2))
def rotatevec(vec, theta):
x = vec[0] * torch.cos(theta) - vec[1] * torch.sin(theta)
y = vec[0] * torch.sin(theta) + vec[1] * torch.cos(theta)
return torch.cat([x, y])
def pts_linspace(pa, pb, pts=300):
pa = pa.view(1, 2)
pb = pb.view(1, 2)
w = torch.arange(0, pts + 1, dtype=pa.dtype).view(-1, 1)
return (pa * (pts - w) + pb * w) / pts
def xyz2uv(xy, z=-1):
c = torch.sqrt((xy ** 2).sum(1))
u = torch.atan2(xy[:, 1], xy[:, 0]).view(-1, 1)
v = torch.atan2(torch.zeros_like(c) + z, c).view(-1, 1)
return torch.cat([u, v], dim=1)
def uv2idx(uv, w, h):
col = (uv[:, 0] / (2 * np.pi) + 0.5) * w - 0.5
row = (uv[:, 1] / np.pi + 0.5) * h - 0.5
return torch.cat([col.view(-1, 1), row.view(-1, 1)], dim=1)
def pc2cor_id(pc, pc_vec, pc_theta, pc_height):
if pc_theta.numel() == 1:
ps = torch.stack([
(pc + pc_vec),
(pc + rotatevec(pc_vec, pc_theta)),
(pc - pc_vec),
(pc + rotatevec(pc_vec, pc_theta - np.pi))
])
else:
ps = pc + pc_vec
ps = ps.view(-1, 2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:, :]), 0)
if (c_num % 2) == 0:
ps[-1, 1] = pc_theta[0, c_num]
else:
ps[-1, 0] = pc_theta[0, c_num]
ps = torch.cat((ps, ps[-1:, :]), 0)
ps[-1, 1] = ps[0, 1]
return torch.cat([
uv2idx(xyz2uv(ps, z=-1), 1024, 512),
uv2idx(xyz2uv(ps, z=pc_height), 1024, 512),
], dim=0)
| 14,463 | 34.364303 | 127 | py |
DMH-Net | DMH-Net-main/postprocess/postprocess2.py | import argparse
import math
import warnings
from typing import List, Optional, Tuple, Dict
import numpy as np
import py360convert
import scipy.signal
import torch
from matplotlib import pyplot as plt
from torch import nn
from torch.utils.data import DataLoader
from tqdm import trange
from config import cfg_from_yaml_file, cfg, cfg_from_list
from e2plabel.e2plabelconvert import VIEW_NAME, VIEW_ARGS
from eval_cuboid import test
from eval_general import test_general
from layers import PerspectiveE2PP2E
from misc.utils import pipeload
from model import DMHNet, ENCODER_RESNET, ENCODER_DENSENET, ENCODER_HOUGH
from perspective_dataset import PerspectiveDataset
from postprocess.GDSolver import solve
from postprocess.LayoutNetv2 import LayoutNetv2PostProcessMain
def line3DConvertCore(cfg, line: List[torch.Tensor], fov, img_hw, view_idx, dis_d=None, dis_u=None, dis_f=None,
yline_mode="ud") -> torch.Tensor:
"""
线表达的格式:(n,8) n是线的总数(必定等于lines[view_idx]中的各tensor大小之和),8维依次表示有效位、类型、标志、x、y、z、所在面的原始类型、所在面的序号。
有效位为1时:
类型:0-x值改变的线,1-y值改变的线,2-z值改变的线。
标志:1表示作为pred结果的线,0表示一般的线 TODO
原始类型:在视图中的线类型.。0:xleft 1:xright 2:yup 3:ydown 4:cupleft 5:cupright 6:cdownleft 7:cdownright
坐标系规定:相机为原点,前向(F)为y轴正向,右向(R)为x轴正向,上向(U)为z轴正向。
"""
ratio = lineCoordToRatio(cfg, line, img_hw)
fov_hori, fov_vert = fov
# 以视图中的内容作为坐标系,获得所有线的表达
result = []
# x方向线的处理
for r, v in zip(ratio[0], line[0]):
isXright = v >= img_hw[1] / 2
try:
if isXright:
# xright
x = dis_f * calLineAngleTan(r, fov_hori)
result.append([1, 2, 0, x, dis_f, math.nan, 1, view_idx])
else:
# xleft
x = dis_f * calLineAngleTan(r, fov_hori)
result.append([1, 2, 0, -x, dis_f, math.nan, 0, view_idx])
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 2, 0, r, v, math.nan, 1 if isXright else 0, view_idx])
else:
raise e
# y方向线的处理
for r, v in zip(ratio[1], line[1]):
isYdown = v >= img_hw[0] / 2
try:
if isYdown:
if yline_mode[1] == "d":
y = dis_d / calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, y, -dis_d, 3, view_idx])
elif yline_mode[1] == "f":
z = dis_f * calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, dis_f, -z, 3, view_idx])
else:
assert False
else:
# yup
if yline_mode[0] == "u":
y = dis_u / calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, y, dis_u, 2, view_idx])
elif yline_mode[0] == "f":
z = dis_f * calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, dis_f, z, 2, view_idx])
else:
assert False
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 0, 0, r, v, math.nan, 3 if isYdown else 2, view_idx])
else:
raise e
# cup线的处理
for r, v in zip(ratio[2], line[2]):
isRight = r >= 0
try:
x = dis_u * r
result.append([1, 1, 0, x, math.nan, dis_u, 5 if isRight else 4, view_idx])
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 1, 0, r, v, math.nan, 5 if isRight else 4, view_idx])
else:
raise e
# cdown线的处理
for r, v in zip(ratio[3], line[3]):
isRight = r >= 0
try:
x = dis_d * r
result.append([1, 1, 0, x, math.nan, -dis_d, 5 if isRight else 4, view_idx])
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 1, 0, r, v, math.nan, 5 if isRight else 4, view_idx])
else:
raise e
result = torch.tensor(result, dtype=torch.float32, device=line[0].device)
if len(result) == 0:
result = result.new_zeros((0, 8))
return result
def line3DConvert(cfg, line: List[torch.Tensor], fov, img_hw, view_idx, dis_d=None, dis_u=None, dis_f=None,
yline_mode="ud") -> torch.Tensor:
r = line3DConvertCore(cfg, line, fov, img_hw, view_idx, dis_d=dis_d, dis_u=dis_u, dis_f=dis_f,
yline_mode=yline_mode)
if VIEW_NAME[view_idx] == "F":
pass
elif VIEW_NAME[view_idx] == "B":
r[:, 3:5] *= -1
elif VIEW_NAME[view_idx] == "L":
oriy = r[:, 4].clone()
r[:, 4] = r[:, 3] # y <- x
r[:, 3] = -oriy # x <- -y
yLineMask = r[:, 1] == 1 # 交换线类别中的x线与y线
r[r[:, 1] == 0, 1] = 1
r[yLineMask, 1] = 0
elif VIEW_NAME[view_idx] == "R":
oriy = r[:, 4].clone()
r[:, 4] = -r[:, 3] # y <- -x
r[:, 3] = oriy # x <- y
yLineMask = r[:, 1] == 1 # 交换线类别中的x线与y线
r[r[:, 1] == 0, 1] = 1
r[yLineMask, 1] = 0
elif VIEW_NAME[view_idx] == "D":
oriy = r[:, 4].clone()
r[:, 4] = r[:, 5] # y <- z
r[:, 5] = -oriy # z <- -y
yLineMask = r[:, 1] == 1 # 交换线类别中的z线与y线
r[r[:, 1] == 2, 1] = 1
r[yLineMask, 1] = 2
elif VIEW_NAME[view_idx] == "U":
oriy = r[:, 4].clone()
r[:, 4] = -r[:, 5] # y <- -z
r[:, 5] = oriy # z <- y
yLineMask = r[:, 1] == 1 # 交换线类别中的z线与y线
r[r[:, 1] == 2, 1] = 1
r[yLineMask, 1] = 2
else:
assert False
return r
def allLinesConvert(cfg, lines: List[List[torch.Tensor]], img_hw, camera_height, dis_u, dis_box, view_args,
extra: Optional[List[List[torch.Tensor]]] = None):
line_result = []
# 四个中间面
for view_idx in range(4):
r = line3DConvert(cfg, lines[view_idx], view_args[view_idx][0], img_hw, view_idx, dis_d=camera_height,
dis_u=dis_u, dis_f=dis_box[view_idx] if dis_box is not None else None, yline_mode="ud")
line_result.append(r)
# 上面
view_idx = 4
r = line3DConvert(cfg, lines[view_idx], view_args[view_idx][0], img_hw, view_idx, dis_f=dis_u,
dis_u=dis_box[2] if dis_box is not None else None,
dis_d=dis_box[0] if dis_box is not None else None, yline_mode="ff")
line_result.append(r)
# 下面
view_idx = 5
r = line3DConvert(cfg, lines[view_idx], view_args[view_idx][0], img_hw, view_idx, dis_f=camera_height,
dis_u=dis_box[0] if dis_box is not None else None,
dis_d=dis_box[2] if dis_box is not None else None, yline_mode="ff")
line_result.append(r)
line_result = torch.cat(line_result, 0)
if extra is not None: # 附加extra信息
line_result = torch.cat([line_result, torch.cat([torch.cat(a) for a in extra]).unsqueeze(1)], 1)
return line_result
def classifyLine(line) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
把线条分成12类:三个值分别代表xyz与0的关系,-表示线沿该方向。
0->> 1->< 2-<> 3-<< 4>-> 5>-< 6<-> 7<-< 8>>- 9><- 10<>- 11<<-
类别信息附加在线的索引最后一维处
:param line (n, k)
:return Tuple[(n, k+1)-处理后的附加了类别信息的线结构,前k维是输入的原始线表示,最后一维是类别;
按照12类别分类后的线的数组]
"""
classify_arr = []
for l in line:
if l[1] == 0:
c = (l[4] < 0) * 2 + (l[5] < 0) + 0
elif l[1] == 1:
c = (l[3] < 0) * 2 + (l[5] < 0) + 4
elif l[1] == 2:
c = (l[3] < 0) * 2 + (l[4] < 0) + 8
classify_arr.append(c)
line = torch.cat([line, line.new_tensor(classify_arr).unsqueeze(1)], 1)
classified_result = []
for i in range(12):
classified_result.append(line[line[:, -1] == i])
return line, classified_result
def lwhToPeaks(cfg, lwh: torch.Tensor, img_hw) -> List[List[Tuple[torch.Tensor, torch.Tensor]]]:
"""
:return 6个面、4种线、两个数第一个代表霍夫域值、第二个代表线的可见长度百分比
"""
lwh = torch.abs(lwh)
result = [[[] for _ in range(4)] for _ in range(6)]
TABLE = [
[3, [0, 1, 5, 4]],
[1, [3, 2, 5, 4]],
[2, [1, 0, 5, 4]],
[0, [2, 3, 5, 4]],
[5, [0, 1, 2, 3]],
[4, [0, 1, 3, 2]]
]
for view_idx in range(6):
ratio = 1 - (lwh[TABLE[view_idx][1]] / lwh[TABLE[view_idx][0]])
with torch.set_grad_enabled(True):
length = ratio.new_ones(4)
pointPlace = [(img_hw[1] - 1) / 2 * ratio[0], (img_hw[1] - 1) / 2 * (2 - ratio[1]),
(img_hw[0] - 1) / 2 * ratio[2], (img_hw[0] - 1) / 2 * (2 - ratio[3])]
# 找交点、refine 水平竖直线长度、计算中心线
for i in range(2):
for j in range(2, 4):
if not (ratio[i] > 0 and ratio[j] > 0):
continue
with torch.set_grad_enabled(True):
length[i] = length[i] - (ratio[j] / 2)
length[j] = length[j] - (ratio[i] / 2)
lenCLine = min(ratio[i], ratio[j]).clone()
houghParamCLine, isCDown = PerspectiveDataset.coord2AngleValue(pointPlace[i], pointPlace[j], img_hw)
result[view_idx][3 if isCDown == 1 else 2].append((houghParamCLine, lenCLine))
# 添加水平和竖直的peak
for i in range(4):
if ratio[i] > 0:
result[view_idx][0 if i < 2 else 1].append((pointPlace[i], length[i]))
for t in range(4):
l = result[view_idx][t]
l2 = (torch.stack([tup[0] for tup in l]) if len(l) > 0 else lwh.new_zeros(0),
torch.stack([tup[1] for tup in l]) if len(l) > 0 else lwh.new_zeros(0))
result[view_idx][t] = l2
return result
def solveActualHeightByIOU(d: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
:param d shape(n, 4),四维分别代表(y+, x+, y-, x-)。u格式相同。
:return shape(n) 表示相机到上方实际高度,与当前假设高度的比值
"""
changeToNegative = d.new_tensor([-1, 1, -1, 1])
d, u = (d[[3, 1, 2, 0]] * changeToNegative).unsqueeze(0), (u[[3, 1, 2, 0]] * changeToNegative).unsqueeze(0)
class UpperIOUModule(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones(d.shape[0], requires_grad=True))
def forward(self):
return (1 - calculateIOU(d, u * self.w).diagonal()).mean()
module = UpperIOUModule().to(d.device)
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
return module.w.data
def interpolate(vector: torch.Tensor, position: torch.Tensor):
floored = torch.floor(position).to(torch.int64)
flooredAdd1 = floored + 1
floored = torch.clamp(floored, 0, len(vector) - 1)
flooredAdd1 = torch.clamp(flooredAdd1, 0, len(vector) - 1)
remain = position - floored
return remain * vector[flooredAdd1] + (1 - remain) * vector[floored]
def lossFunction(output, img_idx, peakss):
preds_xy = torch.sigmoid(output["p_preds_xy"][img_idx]).to(peakss[0][0][0].device)
preds_cud = torch.sigmoid(output["p_preds_cud"][img_idx]).to(peakss[0][0][0].device)
views_result = []
for view_idx, ((xPeaks, xLength), (yPeaks, yLength), (cupPeaks, cupLength),
(cDownPeaks, cDownLength)) in enumerate(peakss):
fourPreds = [preds_xy[view_idx, :, 0], preds_xy[view_idx, :, 1],
preds_cud[view_idx, :, 0], preds_cud[view_idx, :, 1]]
fourPeaks = [xPeaks, yPeaks, cupPeaks, cDownPeaks]
fourLength = [xLength, yLength, cupLength, cDownLength]
view_result = []
for preds, peaks, length in zip(fourPreds, fourPeaks, fourLength):
scores = interpolate(preds, peaks)
scores = scores * length
view_result.append(scores)
view_result = torch.cat(view_result)
views_result.append(view_result)
views_result = torch.cat(views_result)
final_result = views_result.sum()
final_result.requires_grad_(True)
return 20 - final_result
def solveLwh(cfg, output, img_idx, start_lwh, img_hw):
class LWHSolveModule(nn.Module):
def __init__(self):
super().__init__()
startParam = start_lwh[[0, 1, 2, 3, 5]]
self.param = nn.Parameter(startParam.clone().detach().requires_grad_(True), requires_grad=True)
def lwh(self):
return torch.cat([self.param[0:4], self.param.new_tensor([-1.6]), self.param[4:5]])
def forward(self):
lwh = self.lwh()
peaks = lwhToPeaks(cfg, lwh, img_hw)
return lossFunction(output, img_idx, peaks)
module = LWHSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
return module.lwh().data, torch.abs(module.forward())
def linesGTProcess_PretendDisUThenOptimIOU(cfg, lines: List[List[torch.Tensor]], img_hw, camera_height=1.6,
view_args=VIEW_ARGS,
preset_dis_u=1.6):
"""
假设一个相机到天花板的高度,完整的估计出上方的方框的尺寸,再通过优化IOU问题求高度
"""
with torch.no_grad():
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, preset_dis_u, None, view_args)
line_result, classified = classifyLine(line_result)
# 注意,到此时,z线(竖直线)的坐标还没有算出
# 断言:对于GT,每个类别中的、在原始perspect中是xyline的线应当有且仅有一条
# 除了没有算出的z线外,余下的八条线就构成了下框和上框
# 优化两者的IOU最大问题,求出了height。利用height
dis_u = preset_dis_u
downbox, upbox, _ = calculateUDBox(cfg, classified, useCLine=False, useZLine=False,
require_onlyone=cfg.DATA.TYPE == "cuboid")
w = solveActualHeightByIOU(downbox, upbox).item()
dis_u = dis_u * w
dis_box = (downbox + upbox * w) / 2
# 单独重算一次line_result,以让z线能产生结果
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, dis_u, dis_box, view_args)
return line_result, dis_box.new_tensor(
[-dis_box[3], dis_box[1], -dis_box[2], dis_box[0], -camera_height, dis_u])
def updateProb(line: torch.Tensor, amount: float, mask: Optional[torch.BoolTensor] = None) -> torch.Tensor:
toAdd = torch.zeros_like(line)
if mask is not None:
toAdd[mask, 8] = amount
else:
toAdd[:, 8] = amount
return line + toAdd
def calculateUDBox(cfg, classified: List[torch.Tensor], useCLine=True, useZLine=True, c_line_prob_punish=None,
z_line_prob_punish=None, require_onlyone=False) -> Tuple[
torch.Tensor, torch.Tensor, Optional[dict]]:
"""
根据已经算好的、分类过的三维线结果,重算上下框。
重算上下框的策略:每个框对应三组线:两组竖直线、一组本身对应的天花板线或地板线。
给竖直线的概率均减去一个常数,再把三组线merge到一块、取概率最大者。
:return Tuple[downbox, upbox, extra] box的格式为(y+, x+, y-, x-),extra是dict或None
"""
c_line_prob_punish = c_line_prob_punish if c_line_prob_punish is not None else cfg.POST_PROCESS.CPP
z_line_prob_punish = z_line_prob_punish if z_line_prob_punish is not None else cfg.POST_PROCESS.ZPP
z_class_seq = [[8, 10], [8, 9], [9, 11], [10, 11]] if useZLine else [[]] * 4
result = []
extra = None
for mainClass, zClasses, name in zip([1, 5, 3, 7, 0, 4, 2, 6], z_class_seq * 2,
["_↑", "_→", "_↓", "_←", "¯↑", "¯→", "¯↓", "¯←"]):
if not useZLine:
t = classified[mainClass]
notCLine_mask = torch.logical_and(0 <= t[:, 6], t[:, 6] <= 3)
valid_mask = t[:, 0] == 1 if useCLine else torch.logical_and(t[:, 0] == 1, notCLine_mask)
t = t[valid_mask]
if useCLine and c_line_prob_punish is not None: # cline惩罚
t = updateProb(t, -c_line_prob_punish, ~notCLine_mask) # 减去被惩罚的概率值
r = t
else:
r = []
t = classified[mainClass]
notCLine_mask = torch.logical_and(0 <= t[:, 6], t[:, 6] <= 3)
valid_mask = t[:, 0] == 1 if useCLine else torch.logical_and(t[:, 0] == 1, notCLine_mask)
t = t[valid_mask]
if useCLine and c_line_prob_punish is not None: # cline惩罚
t = updateProb(t, -c_line_prob_punish, ~notCLine_mask) # 减去被惩罚的概率值
r.append(t)
for zClass in zClasses:
t = classified[zClass]
notCLine_mask = torch.logical_and(0 <= t[:, 6], t[:, 6] <= 3)
valid_mask = t[:, 0] == 1 if useCLine else torch.logical_and(t[:, 0] == 1, notCLine_mask)
t = t[valid_mask]
if useCLine and c_line_prob_punish is not None: # cline惩罚
t = updateProb(t, -c_line_prob_punish, ~notCLine_mask) # 减去被惩罚的概率值
if z_line_prob_punish is not None: # zline惩罚(对zCLass的所有类适用)
t = updateProb(t, -z_line_prob_punish) # 减去被惩罚的概率值
r.append(t)
r = torch.cat(r, 0)
pickWhichAxis = (1 if mainClass <= 3 else 0) + 3
if require_onlyone:
if len(r) != 1:
warnings.warn(
"calculateUDBox assertion require_onlyone fail! for mainClass {:d}, has {:} lines".format(mainClass,
len(r)))
else:
r[:, 2] = 1
result.append(r[:, pickWhichAxis].mean().abs())
else:
if len(r) > 0:
idx = r[:, 8].argmax()
r[idx, 2] = 1
result.append(r[idx, pickWhichAxis].abs())
else:
warnings.warn("calculateUDBox: no line for mainClass {:s}({:d})!".format(name, mainClass))
result.append(cfg.POST_PROCESS.DEFAULT_DISTANCE)
if extra is None: extra = {}
if "noline" not in extra: extra["noline"] = []
extra["noline"].append(name)
if extra is not None and "noline" in extra: extra["noline"] = " ".join(extra["noline"])
return classified[0].new_tensor(result[0:4]), classified[0].new_tensor(result[4:8]), extra
def linesPredProcess_PretendDisUThenOptimIOU(cfg, lines: List[List[torch.Tensor]], probs: List[List[torch.Tensor]],
img_hw, camera_height=1.6,
view_args=VIEW_ARGS,
preset_dis_u=1.6):
"""
假设一个相机到天花板的高度,完整的估计出上方的方框的尺寸,再通过优化IOU问题求高度
"""
with torch.no_grad():
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, preset_dis_u, None, view_args, extra=probs)
line_result, classified = classifyLine(line_result)
# 注意,到此时,z线(竖直线)的坐标还没有算出
# 对于Pred,除了没有算出的z线外,余下的8类中每类取概率最大的线,这八条线就构成了下框和上框
# 优化两者的IOU最大问题,求出了height,同时也就求出了长宽高
dis_u = preset_dis_u
downbox, upbox, extra = calculateUDBox(cfg, classified, useZLine=False)
w = solveActualHeightByIOU(downbox, upbox).item()
dis_u = dis_u * w
dis_box = (downbox + upbox * w) / 2
extra_nz = extra["noline"] if extra is not None and "noline" in extra else None
for i in range(cfg.POST_PROCESS.ITER if extra_nz is None else cfg.POST_PROCESS.ITER_NZ):
# 输入:到房间顶的距离、下框、上框
# 过程:1.根据下框和上框求解距离因子,更新到房间顶的距离;
# 2.根据新的到房间顶的距离,和上框下框(上框要用刚算出的w缩放一下)的平均值,重求解空间中所有线;
# 3.根据线的完全结果(包含了竖直线和框线的结果)更新下框和上框
# 根据此规则迭代,道理上就可以完成对框线的优化
olddownbox, oldupbox = downbox, upbox
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, dis_u, dis_box, view_args, extra=probs)
line_result, classified = classifyLine(line_result)
downbox, upbox, extra = calculateUDBox(cfg, classified)
w = solveActualHeightByIOU(downbox, upbox).item()
dis_u = dis_u * w
dis_box = (downbox + upbox * w) / 2
if torch.mean(torch.abs(downbox - olddownbox)) < 1e-3 and torch.mean(torch.abs(upbox - oldupbox)) < 1e-3:
break
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, dis_u, dis_box, view_args, extra=probs)
if extra_nz is not None:
if extra is None: extra = {}
extra["nz"] = extra_nz
return line_result, dis_box.new_tensor(
[-dis_box[3], dis_box[1], -dis_box[2], dis_box[0], -camera_height, dis_u]), extra
def findPeaks(cfg, vector: torch.Tensor) -> torch.Tensor:
# TODO 调参等
# locs = scipy.signal.find_peaks_cwt(vector.cpu().numpy(), np.arange(10, 60))
# locs, _ = scipy.signal.find_peaks(vector.cpu().numpy(), height=0.5, distance=60, prominence=0.4)
locs, _ = scipy.signal.find_peaks(vector.cpu().numpy(), distance=cfg.POST_PROCESS.get("PEAK_DISTANCE", 60),
height=cfg.POST_PROCESS.PEAK_HEIGHT,
prominence=cfg.POST_PROCESS.PEAK_PROMINENCE)
return vector.new_tensor(locs)
def predProbMap_PretendDisUThenOptimIOU(cfg, output, img_idx, img_hw, camera_height=1.6, view_args=VIEW_ARGS,
preset_dis_u=1.6):
with torch.no_grad():
lines, probs = extractPredPeaks(cfg, output, img_idx)
return linesPredProcess_PretendDisUThenOptimIOU(cfg, lines, probs, img_hw, camera_height, view_args,
preset_dis_u)
def extractPredPeaks(cfg, output, img_idx):
lines, probs = [], []
for view_idx in range(6):
view_line, view_prob = [], []
for signal in [output["p_preds_xy"][img_idx, view_idx, :, 0],
output["p_preds_xy"][img_idx, view_idx, :, 1],
output["p_preds_cud"][img_idx, view_idx, :, 0],
output["p_preds_cud"][img_idx, view_idx, :, 1]]:
signal = torch.sigmoid(signal)
# 前半部分和后半部分分别寻找peak
mid = len(signal) // 2
peak = torch.cat([findPeaks(cfg, signal[0:mid]), mid + findPeaks(cfg, signal[mid:])])
prob = signal[peak.to(torch.int64)]
view_line.append(peak)
view_prob.append(prob)
lines.append(view_line)
probs.append(view_prob)
return lines, probs
def findPeaks2D8Points(cfg, matrix: torch.Tensor) -> torch.Tensor:
# 每一列,从上、下半部分各取出值最大的n个点, 取平均
h2f = matrix.shape[0] // 2
column_vec = torch.cat([matrix[0:h2f].topk(cfg.POST_PROCESS.EMASK.ROW_CHOOSE_N, dim=0)[0],
matrix[h2f:].topk(cfg.POST_PROCESS.EMASK.ROW_CHOOSE_N, dim=0)[0]], 0).mean(0)
result = []
columns = []
# 对这个东西进行逐段的峰值提取
for i in range(4):
begin, end = matrix.shape[1] * i // 4, matrix.shape[1] * (i + 1) // 4
seq = column_vec[begin:end]
peaks1D = findPeaks(cfg, seq).to(torch.int64)
if len(peaks1D) > 0:
best_peak = peaks1D[seq[peaks1D].argmax()].item() + begin
else:
warnings.warn("when find 2d peaks for emask, len(peaks1D) == 0 when calculating column!")
best_peak = (begin + end) // 2
columns.append(best_peak)
for column in columns:
for i in reversed(range(2)):
begin, end = matrix.shape[0] * i // 2, matrix.shape[0] * (i + 1) // 2
seq = matrix[begin:end, column]
peaks1D = findPeaks(cfg, seq).to(torch.int64)
if len(peaks1D) > 0:
best_peak = peaks1D[seq[peaks1D].argmax()].item() + begin
else:
warnings.warn("when find 2d peaks for emask, len(peaks1D) == 0 when calculating column!")
best_peak = (begin + end) // 2
result.extend([(column, best_peak)])
return matrix.new_tensor(result)
def calPredCorIdByEMask(cfg, emask_img, z0=50):
type_weight = emask_img.new_tensor(cfg.POST_PROCESS.EMASK.TYPE_WEIGHT)
type_weight /= type_weight.sum()
emask_score = (type_weight.unsqueeze(-1).unsqueeze(-1) * emask_img).sum(dim=0)
coords = findPeaks2D8Points(cfg, emask_score).cpu().numpy()
indices = np.repeat(np.argsort(coords[1::2, 0]) * 2, 2)
indices[0::2] += 1
coords = coords[indices]
xyz = py360convert.uv2unitxyz(py360convert.coor2uv(coords, *emask_img.shape[1:3]))
z1 = (xyz[1::2, 1] / xyz[0::2, 1]).mean() * z0
return coords, z0, z1.item()
def calMetrics_PretendDisUThenOptimIOU(cfg, input, output, img_idx, optimization, camera_height=1.6,
view_args=VIEW_ARGS, preset_dis_u=1.6) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor]]:
with torch.no_grad():
img_hw = input["p_imgs"].shape[-2:]
e_img_hw = input["e_img"].shape[-2:]
gt_lines, gt_lwh = linesGTProcess_PretendDisUThenOptimIOU(cfg, input["peaks"][img_idx], img_hw, camera_height,
view_args, preset_dis_u)
gt_cor_id = input["cor"][img_idx]
pred_lines, pred_lwh, pred_extra = predProbMap_PretendDisUThenOptimIOU(cfg, output, img_idx, img_hw,
camera_height,
view_args, preset_dis_u)
if optimization:
pred_lwh, _ = solveLwh(cfg, output, img_idx, pred_lwh, img_hw)
pred_peaks, pred_probs = extractPredPeaks(cfg, output, img_idx)
pred_lines = allLinesConvert(cfg, pred_peaks, img_hw, -pred_lwh[4], pred_lwh[5], pred_lwh[[3, 1, 2, 0]],
view_args, pred_probs)
corner_method = cfg.POST_PROCESS.get("CORNER_METHOD", "lwh")
if corner_method == "lwh":
pred_cor_id_np, z0, z1 = cvtPredLwhToEquirecCornerCoords(pred_lwh, e_img_hw)
elif corner_method == "emask":
pred_cor_id_np, z0, z1 = calPredCorIdByEMask(cfg, output["p_preds_emask"][img_idx])
pred_cor_id = pred_lwh.new_tensor(pred_cor_id_np)
# 统一到GT所在的device上进行计算
pred_lines, pred_lwh = pred_lines.to(gt_lines.device), pred_lwh.to(gt_lwh.device)
metrics = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.POST_PROCESS.get("TEST_WITH_BOTH", False):
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
t["3DIoU-general"] = t["3DIoU"]
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "cuboid":
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metrics[k] = pred_lwh.new_tensor(t[k]) if not isinstance(t[k], str) else t[k]
if pred_extra is not None:
metrics.update(pred_extra)
return (gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metrics
def calMetrics_PretendDisUThenOptimIOUForV2(cfg, input, output, img_idx, optimization, camera_height=1.6,
view_args=VIEW_ARGS, preset_dis_u=1.6) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor], torch.Tensor]:
with torch.no_grad():
img_hw = input["p_imgs"].shape[-2:]
e_img_hw = input["e_img"].shape[-2:]
gt_lines, gt_lwh = linesGTProcess_PretendDisUThenOptimIOU(cfg, input["peaks"][img_idx], img_hw, camera_height,
view_args, preset_dis_u)
gt_cor_id = input["cor"][img_idx]
pred_lines, pred_lwh, pred_extra = predProbMap_PretendDisUThenOptimIOU(cfg, output, img_idx, img_hw,
camera_height,
view_args, preset_dis_u)
if optimization:
pred_lwh, err_score = solveLwh(cfg, output, img_idx, pred_lwh, img_hw)
pred_peaks, pred_probs = extractPredPeaks(cfg, output, img_idx)
pred_lines = allLinesConvert(cfg, pred_peaks, img_hw, -pred_lwh[4], pred_lwh[5], pred_lwh[[3, 1, 2, 0]],
view_args, pred_probs)
corner_method = cfg.POST_PROCESS.get("CORNER_METHOD", "lwh")
if corner_method == "lwh":
pred_cor_id_np, z0, z1 = cvtPredLwhToEquirecCornerCoords(pred_lwh, e_img_hw)
elif corner_method == "emask":
pred_cor_id_np, z0, z1 = calPredCorIdByEMask(cfg, output["p_preds_emask"][img_idx])
pred_cor_id = pred_lwh.new_tensor(pred_cor_id_np)
# 统一到GT所在的device上进行计算
pred_lines, pred_lwh = pred_lines.to(gt_lines.device), pred_lwh.to(gt_lwh.device)
# iou3d = calculateIOU(pred_lwh.unsqueeze(0), gt_lwh.unsqueeze(0))[0, 0]
# iou2d = calculateIOU(pred_lwh[:4].unsqueeze(0), gt_lwh[:4].unsqueeze(0))[0, 0]
# metrics = {
# "box_iou3d": iou3d,
# "box_iou2d": iou2d
# }
metrics = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.POST_PROCESS.get("TEST_WITH_BOTH", False):
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
t["3DIoU-general"] = t["3DIoU"]
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "cuboid":
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metrics[k] = pred_lwh.new_tensor(t[k]) if not isinstance(t[k], str) else t[k]
if pred_extra is not None:
metrics.update(pred_extra)
return (gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metrics, err_score
def LayoutNetv2PostProcessWrapper(cfg, input, output, img_idx) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor]]:
"""
:return Tensor<n,2>角点坐标,可以直接喂给test_general函数的那种
"""
e_img_hw = input["e_img"].shape[-2:]
gt_cor_id = input["cor"][img_idx]
generatePred2DMask(cfg, input, output, img_idx)
pred_cor_id_np = LayoutNetv2PostProcessMain(output["p_preds_emask"][img_idx].mean(0).cpu().numpy(),
output["p_preds_emask"][img_idx].permute(1, 2, 0).cpu().numpy())
pred_cor_id = input["cor"][img_idx].new_tensor(pred_cor_id_np)
metrics = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.DATA.TYPE == "cuboid":
assert False
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metrics[k] = output["p_preds_emask"][img_idx].new_tensor(t[k]) if not isinstance(t[k], str) else t[k]
return (None, None, gt_cor_id), (None, None, pred_cor_id), metrics
def postProcess(cfg, input, output, img_idx, is_valid_mode=False, camera_height=1.6, view_args=VIEW_ARGS,
preset_dis_u=1.6) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor]]:
"""
:return (gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metrics
"""
method = cfg.POST_PROCESS.METHOD if not (is_valid_mode and ("METHOD_WHEN_VALID" in cfg.POST_PROCESS)) \
else cfg.POST_PROCESS.METHOD_WHEN_VALID
if method == "None" or method is None:
return (None, None, input["cor"][img_idx]), (None, None, None), {}
elif method == "geometry" or method == "optimization":
return calMetrics_PretendDisUThenOptimIOU(cfg, input, output, img_idx, method == "optimization", camera_height,
view_args, preset_dis_u)
elif method == "LayoutNetv2":
return LayoutNetv2PostProcessWrapper(cfg, input, output, img_idx)
elif method == "develop" or method == "noncuboid":
from postprocess.noncuboid import nonCuboidPostProcess
return nonCuboidPostProcess(cfg, input, output, img_idx)
else:
assert False, "不支持的POST_PROCESS.METHOD"
def calculateIOU(boxes1: torch.Tensor, boxes2: torch.Tensor):
"""
box格式:(x-,x+,y-,y+,z-,z+)。z-、z+可以不提供。
要求同一轴上的元素,小的必须在大的的前面,否则无法算出正确结果!
"""
assert torch.all(boxes1[:, ::2] <= boxes1[:, 1::2]) and torch.all(boxes2[:, ::2] <= boxes2[:, 1::2])
boxes1 = boxes1.unsqueeze(1)
boxes2 = boxes2.unsqueeze(0)
boxes1_volume = torch.clamp(boxes1[:, :, 1::2] - boxes1[:, :, 0::2], min=0).prod(dim=2)
boxes2_volume = torch.clamp(boxes2[:, :, 1::2] - boxes2[:, :, 0::2], min=0).prod(dim=2)
boxes1 = boxes1.repeat(1, len(boxes2), 1)
boxes2 = boxes2.repeat(len(boxes1), 1, 1)
mixed = torch.stack([boxes1, boxes2], dim=3)
distance = mixed[:, :, 1::2].min(dim=3)[0] - mixed[:, :, 0::2].max(dim=3)[0]
intersect = torch.clamp(distance, min=0).prod(dim=2)
iou = intersect / (boxes1_volume + boxes2_volume - intersect)
return iou
def calLineAngleTan(x, fov: int):
tan = np.tan(np.deg2rad(fov / 2)) if fov != 90 else 1.0
return (1 - x) * tan
def lineCoordToRatio(cfg, line: List[torch.Tensor], img_hw):
result = [t.clone() for t in line]
# 10.12由于和linesPostProcess相同的原因,x、y线处理加上了-1。中心线原来算的就已经是对的了,不需要再处理了。
# y不变线处理
yline = result[1]
yDownMask = yline >= (img_hw[0] - 1) / 2
yline[yDownMask] = (img_hw[0] - 1) - yline[yDownMask]
result[1] = yline / ((img_hw[0] - 1) / 2)
# x不变线处理
xline = result[0]
xRightMask = xline >= (img_hw[1] - 1) / 2
xline[xRightMask] = (img_hw[1] - 1) - xline[xRightMask]
result[0] = xline / ((img_hw[1] - 1) / 2)
# 角度线处理:
if cfg.MODEL.HOUGH.CLINE_TYPE == "NEW":
# 返回的是与铅垂线夹角的tan值。这个值直接乘以已知的相机高度/到天花板的距离,所得结果直接就是x坐标了。
h2, w2 = (img_hw[0] - 1) / 2, (img_hw[1] - 1) / 2
h2f = img_hw[0] // 2
# cup线
cupline = result[2]
mask1 = cupline < h2f - 1
mask3 = cupline > h2f + img_hw[1] - 2
mask2 = torch.logical_not(torch.logical_or(mask1, mask3))
cupline[mask1] = w2 / (((h2f - 1) - cupline[mask1]) - h2)
cupline[mask2] = ((cupline[mask2] - (h2f - 1)) - w2) / h2
cupline[mask3] = -w2 / ((cupline[mask3] - (h2f + img_hw[1] - 2)) - h2)
result[2] = cupline
# cdown线
cdownline = result[3]
mask1 = cdownline < h2f - 1
mask3 = cdownline > h2f + img_hw[1] - 2
mask2 = torch.logical_not(torch.logical_or(mask1, mask3))
cdownline[mask1] = w2 / (h2 - ((h2f - 1) - cdownline[mask1]))
cdownline[mask2] = (w2 - (cdownline[mask2] - (h2f - 1))) / h2
cdownline[mask3] = -w2 / (h2 - (cdownline[mask3] - (h2f + img_hw[1] - 2)))
result[3] = cdownline
else:
raise NotImplementedError()
return result
def gtVisualize(cfg, lines: List[List[torch.Tensor]], img_hw, camera_height=1.6):
"""
Open3D自带的坐标轴中,红色是x轴,绿色是y轴,蓝色是z轴!
显示的线,红色表示x不变线,绿色表示y不变线,蓝色表示过中心的线!
"""
lines_results, layout_param = linesGTProcess_PretendDisUThenOptimIOU(cfg, lines, img_hw)
print(layout_param)
from visualization import o3dRunVis, o3dDrawLines, o3dInitVis
vis = o3dInitVis()
o3dDrawLines(vis, lines_results, layout_param)
o3dRunVis(vis)
def cvtPredLwhToEquirecCornerCoords(lwh, e_img_hw, z0=50):
"""
将估计出的房间lwh格式的数据,转化为HorizonNet的eval_cuboid.py中的test函数兼容的格式。
:param lwh <6> -x, x, -y, y, -z, z
:return dt_cor_id, z0, z1
"""
z1 = lwh[4] / lwh[5] * z0
# 构造8个uv坐标
def lwhToUv(xyz):
u = torch.atan2(xyz[0], xyz[1])
v = torch.atan(xyz[2] / torch.norm(xyz[0:2]))
return torch.stack((u, v))
uvs = []
for i in range(8):
uv = lwhToUv(lwh[[(i // 4) % 2 + 0, (i // 2) % 2 + 2, (i // 1) % 2 + 4]].cpu())
uvs.append(uv)
uvs = torch.stack(uvs)
coords = py360convert.uv2coor(uvs.cpu().numpy(), *e_img_hw)
indices = np.repeat(np.argsort(coords[1::2, 0]) * 2, 2)
indices[0::2] += 1
coords = coords[indices]
return coords, z0, z1.item()
vote_mask_c_up_down = None
def get_vote_mask_c_up_down(cfg, p_img):
global vote_mask_c_up_down
if vote_mask_c_up_down is None:
u, d = PerspectiveE2PP2E.makeVoteMaskStatic(cfg.MODEL.HOUGH.CLINE_TYPE, p_img.shape[-2:], p_img.device)
vote_mask_c_up_down = torch.cat([u, d], -1)
return vote_mask_c_up_down
def _cal_p_pred_2d_mask(cfg, input, img_idx, p_pred_xy_oneimage, p_pred_cud_oneimage):
p_imgs = input["p_imgs"][img_idx]
# 生成六个面的mask
result_2dmask = []
for view_idx, p_img in enumerate(p_imgs):
# xLine的mask
prob = p_pred_xy_oneimage[view_idx, :, 0]
x_mat = prob.unsqueeze(0).expand(p_img.shape[1], -1)
# yLine的mask
prob = p_pred_xy_oneimage[view_idx, :, 1]
y_mat = prob.unsqueeze(1).expand(-1, p_img.shape[2])
# cLine的mask
def _genProb(probs):
"""
把长为(angle_num,2)的,最后一维依次表示cup和cdown的霍夫域上的向量,拼接起来变为,cdown接在cup上的向量
"""
# return torch.cat([probs[:, i] for i in range(probs.shape[1])]) # 该形式是原始的定义,与下面的完全等价但更复杂
return probs.T.reshape(-1)
prob = _genProb(p_pred_cud_oneimage[view_idx])
vote_mask_c_up_down = get_vote_mask_c_up_down(cfg, p_img)
c_mat = (prob * vote_mask_c_up_down).sum(-1) / vote_mask_c_up_down.sum(-1)
result3 = torch.stack([x_mat, y_mat, c_mat])
result_2dmask.append(result3)
result_2dmask = torch.stack(result_2dmask)
return result_2dmask
def _cvt_xyc_p_pred_2d_mask_to_wallceilfloor(cfg, result_2dmask, img_hw):
# 将xyc红绿蓝转为竖直、天花板、地板红绿蓝
zeros = result_2dmask.new_zeros(*img_hw)
wallceilfloor_2dmask = []
for view_idx, mask in enumerate(result_2dmask):
if VIEW_NAME[view_idx] == "U":
res = torch.stack([mask[2], (mask[0] + mask[1]) / 2, zeros], 0)
elif VIEW_NAME[view_idx] == "D":
res = torch.stack([mask[2], zeros, (mask[0] + mask[1]) / 2], 0)
else:
yAndC = (mask[1] + mask[2]) / 2
halfHeight = yAndC.shape[0] // 2
res = torch.stack([mask[0], torch.cat([yAndC[0:halfHeight], zeros[halfHeight:]], 0),
torch.cat([zeros[0:halfHeight], yAndC[halfHeight:]], 0)], 0)
wallceilfloor_2dmask.append(res)
wallceilfloor_2dmask = torch.stack(wallceilfloor_2dmask, 0)
return wallceilfloor_2dmask
def _cal_p_pred_emask(cfg, result_2dmask, img_hw, e_img_hw):
zeros = result_2dmask.new_zeros(result_2dmask.shape[1], *img_hw)
# 将六个面的mask转到全景图上
cube_mask = torch.cat(
[torch.cat([zeros, result_2dmask[4], zeros, zeros], dim=2),
torch.cat([*result_2dmask[[3, 0, 1, 2]]], dim=2),
torch.cat([zeros, result_2dmask[5], zeros, zeros], dim=2)], dim=1)
equal_mask = py360convert.c2e(cube_mask.permute(1, 2, 0).cpu().numpy(), *e_img_hw)
equal_mask = torch.tensor(equal_mask, device="cpu").permute(2, 0, 1) # c2e步骤后不再送回显卡,而是保持在CPU上供可视化等使用
return equal_mask
def generatePred2DMask(cfg, input, output, img_idx):
if "p_preds_2dmask" not in output: output["p_preds_2dmask"] = [None] * len(input["p_imgs"])
if "p_preds_emask" not in output: output["p_preds_emask"] = [None] * len(input["p_imgs"])
if output["p_preds_2dmask"][img_idx] is not None: return
img_hw = input["p_imgs"][img_idx].shape[2:4]
e_img_hw = input["e_img"][img_idx].shape[1:3]
result_2dmask = _cal_p_pred_2d_mask(cfg, input, img_idx, torch.sigmoid(output["p_preds_xy"][img_idx]),
torch.sigmoid(output["p_preds_cud"][img_idx]))
wallceilfloor_2dmask = _cvt_xyc_p_pred_2d_mask_to_wallceilfloor(cfg, result_2dmask, img_hw)
equal_mask = _cal_p_pred_emask(cfg, wallceilfloor_2dmask, img_hw, e_img_hw)
output["p_preds_2dmask"][img_idx] = result_2dmask
output["p_preds_emask"][img_idx] = equal_mask
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', type=str, required=True, help='specify the config for training')
parser.add_argument('--ckpt', required=True, help='checkpoint for evaluation')
parser.add_argument('--visu_count', default=20, type=int, help='visualize how many batches')
parser.add_argument('--batch_size', default=1, type=int, help='mini-batch size')
# Model related
parser.add_argument('--backbone',
default='drn38',
choices=ENCODER_RESNET + ENCODER_DENSENET + ENCODER_HOUGH,
help='backbone of the network')
parser.add_argument('--no_rnn', action='store_true', help='whether to remove rnn or not')
parser.add_argument('--no_multigpus', action='store_true', help='disable data parallel')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
device = torch.device('cuda')
dataset_valid = PerspectiveDataset(cfg, "test")
loader_valid = DataLoader(dataset_valid,
args.batch_size,
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=0,
pin_memory=True)
iterator_valid = iter(loader_valid)
net = DMHNet(cfg, args.backbone, not args.no_rnn).to(device)
if not args.no_multigpus:
net = nn.DataParallel(net) # multi-GPU
print(str(cfg.POST_PROCESS))
state_dict = pipeload(args.ckpt, map_location='cpu')["state_dict"]
net.load_state_dict(state_dict, strict=True)
net.eval()
count = 0
DBG_START = 0
for valid_idx in trange(args.visu_count, desc='PostProcess Visualization', position=2):
input = next(iterator_valid)
with torch.no_grad():
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
_, results_dict = net(input)
for i in range(len(input["filename"])):
count += 1
if count <= DBG_START:
continue
(gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metric = postProcess(cfg, input,
results_dict,
i)
print("{:s} pred{:s} gt{:s}".format(str(metric), str(pred_lwh), str(gt_lwh)))
# 画方框的代码
from visualization import o3dRunVis, o3dDrawLines, o3dInitVis
vis = o3dInitVis()
o3dDrawLines(vis, gt_lines, gt_lwh, [1.0, 0.0, 0.0])
o3dDrawLines(vis, pred_lines, pred_lwh)
o3dRunVis(vis)
# 画角点的代码
from visualization import drawEqualRectCorners
drawEqualRectCorners(plt, input["e_img"][i], gt_cor_id, pred_cor_id)
plt.show()
| 44,265 | 42.060311 | 120 | py |
DMH-Net | DMH-Net-main/postprocess/GDSolver.py | import math
import torch
from torch import nn, optim
def solve(module: nn.Module, *inputs, lr=1e-2, tol=1e-4, max_iter=10000, optimizer=None, stop_tol=None, stop_range=None,
return_best=True, **kwargs):
history = None
if stop_range is not None:
assert stop_tol is not None
best_loss = None
best_state_dict = None
with torch.enable_grad():
if optimizer is None:
optimizer = optim.SGD(module.parameters(), lr=lr)
for i in range(max_iter):
result = module(*inputs, **kwargs)
loss = torch.abs(result) # 优化目标固定设为方程=0
if return_best and (best_loss is None or loss < best_loss):
best_loss = loss.clone()
state_dict = module.state_dict()
best_state_dict = {k: state_dict[k].clone() for k in state_dict}
optimizer.zero_grad()
loss.backward()
optimizer.step()
if loss < tol:
break
if stop_range is not None:
if history is None:
history = torch.ones(stop_range, device=result.device, dtype=torch.float32) * math.inf
history[i % stop_range] = result.item()
if history.max() - history.min() < stop_tol:
break
if return_best:
# 最优的state_dict读回模型,返回
module.load_state_dict(best_state_dict)
return module
| 1,452 | 32.022727 | 120 | py |
DMH-Net | DMH-Net-main/postprocess/noncuboid.py | import traceback
import warnings
from typing import List, Tuple
import numpy as np
import torch
from easydict import EasyDict
from torch import nn
from e2pconvert_torch.e2plabelconvert import generateOnePerspectiveLabel
from e2pconvert_torch.torch360convert import coor2uv, xyz2uv, uv2unitxyz, uv2coor
from e2plabel.e2plabelconvert import VIEW_NAME, VIEW_ARGS, VIEW_SIZE
from eval_general import test_general
from perspective_dataset import PerspectiveDataset
from postprocess.GDSolver import solve
from postprocess.LayoutNetv2 import map_coordinates_Pytorch
from postprocess.postprocess2 import generatePred2DMask, _cal_p_pred_emask, findPeaks, \
lossFunction, calMetrics_PretendDisUThenOptimIOUForV2
FILE_LIST = [
"7y3sRwLe3Va_1679b5de39e548d38ba240f2fd99cae9.png",
"7y3sRwLe3Va_1679b5de39e548d38ba240f2fd99cae9.png",
"7y3sRwLe3Va_b564162b2c7d4033bfe6ef3dfb959c9e.png",
"7y3sRwLe3Va_fdab6422162e49db822a37178ab70481.png",
"B6ByNegPMKs_0e5ba44387774783903fea2a1b8f53dd.png",
"B6ByNegPMKs_4c769d1a658d41eb995deb5b40af57a4.png",
"7y3sRwLe3Va_5c39473b25b74307858764d1a2045b9e.png",
"7y3sRwLe3Va_6376b741b50a4418b3dc3fde791c3c09.png",
]
def nonCuboidPostProcess(cfg, input, output, img_idx):
if cfg.get("VISUALIZATION", {}).get("TYPE") is None:
cfg.VISUALIZATION = EasyDict()
cfg.VISUALIZATION.TYPE = [["c", "y", "x", "e_rm", "gtlines", "text"]]
generatePred2DMask(cfg, input, output, img_idx)
mask2ds = output["p_preds_2dmask"][img_idx]
gt_cor_id = input["cor"][img_idx]
gt_cor_id_np = gt_cor_id.cpu().numpy()
cor_mask2ds = torch.stack([
mask2ds[:, 0] * mask2ds[:, 1],
mask2ds[:, 1] * mask2ds[:, 2],
mask2ds[:, 0] * mask2ds[:, 2],
], 1)
if cfg.POST_PROCESS.get("COR_IMG_CAL") is None:
cor_mask2ds = cor_mask2ds.mean(1)
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "max":
cor_mask2ds = cor_mask2ds.max(1)[0]
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "merge":
cor_mask2ds = (cor_mask2ds.sum(1) + cor_mask2ds.max(1)[0] * 2) / 5
img_hw = input["p_imgs"][img_idx].shape[2:4]
e_img_hw = input["e_img"][img_idx].shape[1:3]
cor_img = _cal_p_pred_emask(cfg, cor_mask2ds.unsqueeze(1), img_hw, e_img_hw).squeeze(0)
column_img = cor_img.max(0)[0]
columns = [] # 长度为n的数组,前4、6、8...代表4、6、8时的初始化
ranges = [(e_img_hw[1] // 4 * i, e_img_hw[1] // 4 * (i + 1)) for i in range(4)]
scores = [column_img[r[0]:r[1]] for r in ranges]
peakss = [findPeaks(cfg, r).to(torch.int64) for r in scores]
peakss = [r[s[r].argsort(descending=True)] for r, s in zip(peakss, scores)]
# 先从每个四分之一图找一个点
columns.extend([(r[0] if len(r) > 0 else r.new_tensor(e_img_hw[1] // 4 // 4)) + begin for r, (begin, _) in
zip(peakss, ranges)])
peakss_2 = torch.cat([r[1:] + begin for r, (begin, _) in zip(peakss, ranges)])
# 剩下的中按置信度排序
peakss_2 = peakss_2[column_img[peakss_2].argsort(descending=True)]
columns.extend(peakss_2)
columns = torch.stack(columns)
res_v2 = None
all_results = []
# for cor_num in [6]:
METHODS = []
METHODS.extend([(n, "v2") for n in cfg.POST_PROCESS.get("COMBINE", {}).get("V2", [4,6,8,10,12])])
METHODS.extend([(n, "v1") for n in cfg.POST_PROCESS.get("COMBINE", {}).get("V1", [4,6,8,10,12])])
for cor_num, method in METHODS:
# TODO: 设置length恒为1,看看到底是直线好还是线段好;
# TODO: 设置loss聚合的方法,比如求和?平均?
if method == "v0":
if len(columns) < cor_num: continue
columns_one = columns[0:cor_num].sort()[0] # 记得一定要排序!!!!
cor_img_columns = cor_img[:, columns_one]
upper_y = cor_img_columns[:img_hw[0] // 2].argmax(0)
lower_y = cor_img_columns[img_hw[0] // 2:].argmax(0) + (img_hw[0] // 2)
init_cors = torch.cat(
[cor_img.new_tensor([[c, u], [c, l]]) for c, u, l in zip(columns_one, upper_y, lower_y)], 0)
# cornersToPeaks(cfg, input, img_idx, init_cors)
# pred_cor_id=init_cors
# metrics = {}
pred_cor_id, err_score = solveCorners(cfg, input, output, img_idx, init_cors, e_img_hw)
else:
if method == "v2":
if res_v2 is None:
res_v2 = generate2DFrameFromColumnPeaksV2(cfg, columns, cor_img, ranges)
res_v2_0, init_cors_all = res_v2 # init_cors现在由V2主函数返回
res_frame = generate2DFrameFromColumnPeaksV2_ChooseCorNum(res_v2_0, columns, cor_num, ranges)
if res_frame is None:
continue
choice_idx, beginFromZ = res_frame
choice_corner_idx = torch.cat([torch.stack([2 * v, 2 * v + 1]) for v in choice_idx])
init_cors = init_cors_all[choice_corner_idx]
elif method == "v1":
res_frame = generate2DFrameFromColumnPeaks(columns, cor_img, cor_num)
if res_frame is None:
continue
choice_idx, beginFromZ = res_frame
columns_one = columns[choice_idx]
cor_img_columns = cor_img[:, columns_one]
upper_y = cor_img_columns[:img_hw[0] // 2].argmax(0)
lower_y = cor_img_columns[img_hw[0] // 2:].argmax(0) + (img_hw[0] // 2)
init_cors = torch.cat(
[cor_img.new_tensor([[c, u], [c, l]]) for c, u, l in zip(columns_one, upper_y, lower_y)], 0)
# pred_cor_id = init_cors
if not cfg.POST_PROCESS.get("COMBINE", {}).get("SOLVE3", False):
pred_cor_id, err_score = solveCorners2(cfg, input, output, img_idx, init_cors, cor_img, beginFromZ,
e_img_hw)
else:
pred_cor_id, err_score = solveCorners3(cfg, input, output, img_idx, init_cors, cor_img, beginFromZ,
e_img_hw)
# cornersToPeaks(cfg, input, img_idx, pred_cor_id)
# metrics = {}
pred_cor_id_np = pred_cor_id.cpu().numpy()
one_result = {"cor_num": cor_num, "err_score": err_score, "pred_cor": pred_cor_id, "method": method}
metric = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.DATA.TYPE == "cuboid":
assert False
test(pred_cor_id_np, z0, z1, gt_cor_id_np, e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id_np, e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metric[k] = torch.tensor(t[k]) if not isinstance(t[k], str) else t[k]
one_result["metrics"] = metric
all_results.append(one_result)
if cfg.POST_PROCESS.get("COMBINE", {}).get("OPTIM", True):
_, (_, _, pred_cor_id_optim), metric_optim, err_score_optim = \
calMetrics_PretendDisUThenOptimIOUForV2(cfg, input, output, img_idx, True)
all_results.append({"cor_num": 4, "err_score": err_score_optim, "pred_cor": pred_cor_id_optim, "method": "optim", "metrics": metric_optim})
METRIC_KEYS=["2DIoU", "3DIoU", "rmse", "delta_1"]
err_scores = torch.tensor([one_result["err_score"] for one_result in all_results])
best_result_idx = err_scores.argmin()
best_result_cor_num = all_results[best_result_idx]["cor_num"]
pred_cor_id = all_results[best_result_idx]["pred_cor"]
metrics = {}
metrics.update(all_results[best_result_idx]["metrics"])
# all_results中直接挑指标最好的
metrics_all = {k: torch.stack([one_result["metrics"][k] for one_result in all_results]) for k in METRIC_KEYS}
metrics_best = {"best/" + k: v.max() for k, v in metrics_all.items()}
metrics.update(metrics_best)
metrics["pred_cor_num"] = torch.tensor(float(best_result_cor_num))
# 但是,仍然要打印所有的结果
additional_metrics = {}
for one_result in all_results:
additional_metrics.update(
{one_result["method"] + "/" + str(one_result["cor_num"]) + "/" + k: one_result["metrics"][k] for k in one_result["metrics"]})
additional_metrics.update({one_result["method"] + "/" + str(one_result["cor_num"]) + "/err_score": one_result["err_score"]})
print(additional_metrics)
return (None, None, gt_cor_id), (None, None, pred_cor_id), metrics
def cornersToPeaks(cfg, input, img_idx, cors, isInputUv=False) -> List[List[Tuple[torch.Tensor, torch.Tensor]]]:
result = []
for view_idx, (view_name, view) in enumerate(zip(VIEW_NAME, VIEW_ARGS)):
r = generateOnePerspectiveLabel(input["e_img"][img_idx], cors, *view, VIEW_SIZE, isInputUv=isInputUv)
# input["lines"][img_idx][view_idx] = r["lines"] # TODO
peakss, lengths = PerspectiveDataset.linesToPeaksNewCore([line[3:8] for line in r["lines"]], VIEW_SIZE)
xPeaks, yPeaks, cUpPeaks, cDownPeaks = ((torch.stack(p) if len(p) > 0 else cors.new_zeros(0)) for p in peakss)
xLength, yLength, cUpLength, cDownLength = ((torch.stack(p) if len(p) > 0 else cors.new_zeros(0)) for p in
lengths)
result.append([(xPeaks, xLength), (yPeaks, yLength), (cUpPeaks, cUpLength), (cDownPeaks, cDownLength)])
return result
def solveCorners(cfg, input, output, img_idx, start_corners, e_img_hw):
"""
参数定义:共(n*2+1)个,其中n是4,6,8...(len(start_corners)=2*n)
分别表示角点的x值、天花板的tan值、地板tan除以天花板tan。
"""
uv = coor2uv(start_corners, *e_img_hw)
xs = start_corners[::2, 0]
tanceil = torch.tan(uv[::2, 1])
tanratio = (torch.tan(uv[1::2, 1]) / torch.tan(uv[::2, 1])).mean()
class CornersSolveModule(nn.Module):
def __init__(self):
super().__init__()
self.xs = nn.Parameter(xs.clone().detach().requires_grad_(True))
self.tanceil = nn.Parameter(tanceil.clone().detach().requires_grad_(True))
self.tanratio = nn.Parameter(tanratio.clone().detach().requires_grad_(True))
def toCors(self):
tanfloor = self.tanceil * self.tanratio
tanceilfloor = []
for c, f in zip(self.tanceil, tanfloor):
tanceilfloor.extend((c, f))
vs = torch.atan(torch.stack(tanceilfloor))
coor_y = (-vs / np.pi + 0.5) * e_img_hw[0] - 0.5
cors = torch.stack([self.xs.repeat_interleave(2), coor_y], 1)
return cors
def forward(self):
cors = self.toCors()
try:
peaks = cornersToPeaks(cfg, input, img_idx, cors)
return lossFunction(output, img_idx, peaks)
except:
traceback.print_exc()
warnings.warn("CornersSolveModule forward时抛出异常")
return torch.tensor(20., device=cors.device, requires_grad=True)
module = CornersSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
return module.toCors(), torch.abs(module.forward())
def generate2DFrameFromColumnPeaksV2(cfg, column_peaks, cor_img, ranges):
"""
:param column_peaks (x) 不定长vector。所有的列peak,要求按照置信度从大到小排好序;且其中前四个元素必须恰好位于四个方向上。
:return None或Tuple[Tensor<cor_num>, bool]。Tensor<cor_num>是column_peaks中应被选中的元素的 **下标**,bool表示chosen[0]到chosen[1]之间的连线应该沿z(前后)方向。
"""
# 只用前4个点,求出一个假想高度,把所有peak投射到xz空间里去
column_peaks, sortedIdxs = column_peaks.sort()
oldIdxToNewIdx = torch.cat([torch.where(sortedIdxs == v)[0] for v in range(len(sortedIdxs))])
def getXz():
img_hw = cor_img.shape
columns_one = column_peaks
cor_img_columns = cor_img[:, columns_one]
upper_y = cor_img_columns[:img_hw[0] // 2].argmax(0)
lower_y = cor_img_columns[img_hw[0] // 2:].argmax(0) + (img_hw[0] // 2)
init_cors = torch.cat(
[cor_img.new_tensor([[c, u], [c, l]]) for c, u, l in zip(columns_one, upper_y, lower_y)], 0)
uv = coor2uv(init_cors, *img_hw)
tanv = torch.tan(uv[:, 1])
# xs = start_corners[::2, 0]
# tanceil = torch.tan(uv[::2, 1])
# 第一步:计算height,并把所有的点都只移动v坐标调整到同一高度上。
tanratio = tanv[0::2] / tanv[1::2]
# !!!只用传进来的前4个点做支撑点!注意要用oldIdxToNewIdx做idx转换
if cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "both":
prob_ground = map_coordinates_Pytorch(cor_img, init_cors[1::2][oldIdxToNewIdx[0:4]].T.flip(0))
the_mask = chooseByProb(cfg, prob_ground)
aim_tanratio = tanratio[oldIdxToNewIdx[0:4][the_mask]].mean()
else:
aim_tanratio = tanratio[oldIdxToNewIdx[0:4]].mean()
v_adjust_factor = torch.sqrt(tanratio / aim_tanratio)
v_adjust_vector = torch.cat([torch.stack([1 / v, v]) for v in v_adjust_factor])
tanv = tanv * v_adjust_vector
uv[:, 1] = torch.atan(tanv)
# 第二步:构建初始边框
# 所有的点uv已知,投影到xyz上、固定y轴的值为height或-1.6,得到x和z
# 根据beginFromZ和置信度,refine每个点的x坐标和z坐标。
xyz = uv2unitxyz(uv)
# xyz[0::2] = xyz[0::2] / xyz[0::2, 1] * height
# xyz[1::2] = xyz[1::2] / xyz[1::2, 1] * -1.6
xz = (xyz[1::2] / xyz[1::2, 1:2] * -1.6)[:, [0, 2]]
e_coor = uv2coor(uv, *cor_img.shape)
prob = map_coordinates_Pytorch(cor_img, e_coor.T.flip(0))
prob = (prob[0::2] + prob[1::2]) / 2
# TODO 问题:用进入下一阶段的点坐标用什么?refine过一轮的?还是没refine的?
coor = e_coor if cfg.POST_PROCESS.V2.REFINE_V_TWICE else init_cors
oldIdxToNewCornerIdx = torch.cat([torch.stack([2 * v, 2 * v + 1]) for v in oldIdxToNewIdx])
coor = coor[oldIdxToNewCornerIdx]
return xz, prob, coor
xz, prob, coor = getXz()
# TODO 问题:用什么概率?是新的点概率,还是列概率?
if cfg.POST_PROCESS.V2.FIRST_COLUMN_PROB:
column_img = cor_img.max(0)[0]
prob = column_img[column_peaks]
l = len(xz)
# 逐点连线计算方向
delta = xz - torch.cat([xz[1:], xz[0:1]])
absdelta = torch.abs(delta)
lineIsZ: torch.Tensor = (absdelta[:, 0] < absdelta[:, 1]) # 从i到i+1的线是否沿z方向
# 选择【概率最高的、两边连线是x和z方向】的点开始
probIdx = prob.argsort(descending=True)
resultIdxs = None
for v in probIdx:
if lineIsZ[(v - 1 + l) % l] == lineIsZ[v]: continue # 两侧直线方向相同,无效点
beginIdx = v.item()
resultIdxs = [beginIdx]
# 从beginIndex起,沿哪个方向遍历结果都是一样的,所以固定沿正方向走
cur = (beginIdx + 1) % l
beginFromZ = lineIsZ[beginIdx]
curDirect = beginFromZ
while cur != beginIdx:
if lineIsZ[cur] != curDirect:
resultIdxs.append(cur)
curDirect = lineIsZ[cur]
cur = (cur + 1) % l
# 验证操作的结果:只有操作后仍能保证全景图四个区域都还有peak,才准这么做
new_choice_peaks = column_peaks[sortedIdxs[resultIdxs]]
success = True
for r in ranges:
if torch.logical_and(r[0] <= new_choice_peaks, new_choice_peaks < r[1]).sum() <= 0:
success = False
break
if success:
break
else:
resultIdxs = None
if resultIdxs is None:
# 如果找不到任何可行解,则只能拿前四个点跑一遍算了
if len(column_peaks) > 4:
res_v2_0, _ = generate2DFrameFromColumnPeaksV2(cfg, column_peaks[oldIdxToNewIdx][0:4], cor_img, ranges)
return res_v2_0, coor
else:
# 递归后仍然不行,那就直接返回吧
resultIdxs = [0, 1, 2, 3]
beginFromZ = lineIsZ[0]
return (sortedIdxs[resultIdxs], prob[resultIdxs], beginFromZ), coor
def generate2DFrameFromColumnPeaksV2_ChooseCorNum(res_v2_0, column_peaks, cor_num, ranges):
choice_idx, choice_probs, beginFromZ = res_v2_0
assert len(choice_idx) % 2 == 0 and 4 <= cor_num <= 12 and cor_num % 2 == 0
if len(choice_idx) == cor_num:
return choice_idx, beginFromZ
elif len(choice_idx) < cor_num:
return None
else:
# 一次只删两个点,然后递归
# 寻找概率最低的线段,remove
line_prob = choice_probs + torch.cat([choice_probs[1:], choice_probs[0:1]])
idxSeq = line_prob.argsort()
new_choice_idx, new_choice_probs, new_beginFromZ = None, None, None
# 选择要delete的线:
for toRemoveIdx in idxSeq:
if toRemoveIdx < len(choice_idx) - 1:
# 直接两端拼接即可
new_choice_idx = torch.cat([choice_idx[0:toRemoveIdx], choice_idx[toRemoveIdx + 2:]])
new_choice_probs = torch.cat([choice_probs[0:toRemoveIdx], choice_probs[toRemoveIdx + 2:]])
new_beginFromZ = beginFromZ
else:
# 从1到l-1,且beginFromZ要反转
new_choice_idx = choice_idx[1:len(choice_idx) - 1]
new_choice_probs = choice_probs[1:len(choice_idx) - 1]
new_beginFromZ = not beginFromZ
# 验证操作的结果:只有操作后仍能保证全景图四个区域都还有peak,才准这么做
new_choice_peaks = column_peaks[new_choice_idx]
success = True
for r in ranges:
if torch.logical_and(r[0] <= new_choice_peaks, new_choice_peaks < r[1]).sum() <= 0:
success = False
break
if success:
# 如果验证通过,则说明成功减掉了两个点。
# 则递归下去继续减点,如果下层返回了值就是通过了,如果下层返回了None,则本层继续
new_res_v2_0 = (new_choice_idx, new_choice_probs, new_beginFromZ)
res = generate2DFrameFromColumnPeaksV2_ChooseCorNum(new_res_v2_0, column_peaks, cor_num, ranges)
if res is not None:
return res
# 如果走到这里还没成功递归出有效的结果,就是不行了
return None
def generate2DFrameFromColumnPeaks(column_peaks, cor_img, cor_num):
"""
:param column_peaks (x) 不定长vector。所有的列peak,要求按照置信度从大到小排好序;且其中前四个元素必须恰好位于四个方向上。
:return None或Tuple[Tensor<cor_num>, bool]。Tensor<cor_num>是column_peaks中应被选中的元素的 **下标**,bool表示chosen[0]到chosen[1]之间的连线应该沿z(前后)方向。
"""
assert 4 <= cor_num <= 12 and cor_num % 2 == 0
toChooseIdxs = list(range(4, len(column_peaks)))
chosen = list(column_peaks[0:4].sort()[0])
beginFromZ = True # chosen[0]到chosen[1]之间的连线应该沿z(前后)方向
while len(chosen) < cor_num:
if len(toChooseIdxs) < 2:
return None
beforeChosenCount = len(chosen)
idx0 = toChooseIdxs[0]
for i in range(1, len(toChooseIdxs)):
idx1 = toChooseIdxs[i]
insert_pos = torch.searchsorted(column_peaks.new_tensor(chosen), column_peaks[[idx0, idx1]])
insert_pos_dis = torch.abs(insert_pos[1] - insert_pos[0])
if insert_pos_dis <= 1 or insert_pos_dis == len(chosen):
if insert_pos_dis == len(chosen):
# 环形插入的,则beginFromY应反转
beginFromZ = not beginFromZ
if column_peaks[idx1] >= column_peaks[idx0]:
# idx1位置处元素的插入位置比idx0位置处元素靠后,先插入idx1位置处元素
chosen.insert(insert_pos[1], column_peaks[idx1])
chosen.insert(insert_pos[0], column_peaks[idx0])
else:
chosen.insert(insert_pos[0], column_peaks[idx0])
chosen.insert(insert_pos[1], column_peaks[idx1])
toChooseIdxs.remove(idx0)
toChooseIdxs.remove(idx1)
break
if len(chosen) - beforeChosenCount < 2:
# 尝试插入失败,则应该返回None
return None
return torch.cat([torch.where(column_peaks == v)[0] for v in chosen]), beginFromZ
def chooseByProb(cfg, prob):
MIN_COUNT = 2
if cfg.POST_PROCESS.get("COR_IMG_CAL") is None:
VALUE = 0.4
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "max":
VALUE = 0.7
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "merge":
VALUE = 0.6
result = prob > VALUE
result[prob.argsort(descending=True)[0:MIN_COUNT]] = True
return result
def solveCorners2(cfg, input, output, img_idx, start_corners: torch.Tensor, prob_map: torch.Tensor,
beginFromZ: bool, e_img_hw):
uv = coor2uv(start_corners, *e_img_hw)
tanv = torch.tan(uv[:, 1])
# xs = start_corners[::2, 0]
# tanceil = torch.tan(uv[::2, 1])
# 第一步:计算height,并把所有的点都只移动v坐标调整到同一高度上。
tanratio = tanv[0::2] / tanv[1::2]
if cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "second" or cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "both":
prob_ground = map_coordinates_Pytorch(prob_map, start_corners[1::2].T.flip(0))
the_mask = chooseByProb(cfg, prob_ground)
aim_tanratio = tanratio[the_mask].mean()
else:
aim_tanratio = tanratio.mean()
height = -1.6 * aim_tanratio # 根据(天花板tan/地板tan)的平均值确定初始高度
v_adjust_factor = torch.sqrt(tanratio / aim_tanratio)
v_adjust_vector = torch.cat([torch.stack([1 / v, v]) for v in v_adjust_factor])
tanv = tanv * v_adjust_vector
uv[:, 1] = torch.atan(tanv)
# 第二步:构建初始边框
# 所有的点uv已知,投影到xyz上、固定y轴的值为height或-1.6,得到x和z
# 根据beginFromZ和置信度,refine每个点的x坐标和z坐标。
xyz = uv2unitxyz(uv)
# xyz[0::2] = xyz[0::2] / xyz[0::2, 1] * height
# xyz[1::2] = xyz[1::2] / xyz[1::2, 1] * -1.6
xz = (xyz[1::2] / xyz[1::2, 1:2] * -1.6)[:, [0, 2]]
e_coor = uv2coor(uv, *prob_map.shape)
if cfg.POST_PROCESS.V2.SECOND_START_PROB:
prob_full = map_coordinates_Pytorch(prob_map, start_corners.T.flip(0))
else:
prob_full = map_coordinates_Pytorch(prob_map, e_coor.T.flip(0))
prob = (prob_full[0::2] + prob_full[1::2]) / 2
dis = torch.zeros_like(xz[:, 0])
l = len(dis)
for i in range(l):
if cfg.POST_PROCESS.V2.STRAIGHTEN_BY_PROB:
factor = torch.stack([prob[i], prob[(i + 1) % l]])
factor = factor / factor.sum()
if cfg.POST_PROCESS.V2.STRAIGHTEN_WHEN_BETTER:
if factor[0] > 0.75:
factor = factor.new_tensor([1.0, 0.0])
elif factor[1] > 0.75:
factor = factor.new_tensor([0.0, 1.0])
else:
factor = prob.new_tensor([0.5, 0.5])
if beginFromZ:
# xz[0]到xz[1]是沿z方向的,所以应该强迫i=0时x值相等,填入dis[0]处
if i % 2 == 0:
dis[i] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
else:
dis[i] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
# 否则,xz[0]到xz[1]是沿x方向的,则应该强迫i=0时z值相等,填入1处
if i % 2 == 0:
dis[(i + 1) % l] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
dis[(i + 1) % l] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
class CornersSolveModule(nn.Module):
def __init__(self):
super().__init__()
self.height = nn.Parameter(height.clone().detach().requires_grad_(True))
self.dis = nn.Parameter(dis.clone().detach().requires_grad_(True))
def toUvs(self):
cor_num = len(self.dis)
idxs = torch.arange(cor_num, device=self.dis.device)
idxs = torch.stack([idxs, (idxs + 1) % cor_num], 1)
idxs[1::2] = idxs[1::2].flip(1)
xyz = torch.cat([
torch.stack([self.dis[idxs[:, 0]], self.height.repeat(len(idxs)),
self.dis[idxs[:, 1]]], 1),
torch.stack([self.dis[idxs[:, 0]], self.dis.new_tensor(-1.6).repeat(len(idxs)),
self.dis[idxs[:, 1]]], 1),
], 0)
uv = xyz2uv(xyz)
# 进行排序,这步是必须的!
seq = uv[:cor_num, 0].argsort()
seq = seq.repeat_interleave(2)
seq[1::2] += cor_num
uv = uv[seq]
return uv
def forward(self):
# return torch.tensor(20., device=self.dis.device, requires_grad=True)
uv = self.toUvs()
try:
peaks = cornersToPeaks(cfg, input, img_idx, uv, isInputUv=True)
return lossFunction(output, img_idx, peaks)
except:
traceback.print_exc()
warnings.warn("CornersSolveModule forward时抛出异常")
return torch.tensor(20., device=uv.device, requires_grad=True)
module = CornersSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
uvs = module.toUvs()
return uv2coor(uvs, *e_img_hw), torch.abs(module.forward())
def solveCorners3(cfg, input, output, img_idx, start_corners: torch.Tensor, prob_map: torch.Tensor,
beginFromZ: bool, e_img_hw):
uv = coor2uv(start_corners, *e_img_hw)
tanv = torch.tan(uv[:, 1])
# xs = start_corners[::2, 0]
# tanceil = torch.tan(uv[::2, 1])
# 第一步:计算height,并把所有的点都只移动v坐标调整到同一高度上。
tanratio = tanv[0::2] / tanv[1::2]
if cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "second" or cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "both":
prob_ground = map_coordinates_Pytorch(prob_map, start_corners[1::2].T.flip(0))
the_mask = chooseByProb(cfg, prob_ground)
aim_tanratio = tanratio[the_mask].mean()
else:
aim_tanratio = tanratio.mean()
height = -1.6 * aim_tanratio # 根据(天花板tan/地板tan)的平均值确定初始高度
v_adjust_factor = torch.sqrt(tanratio / aim_tanratio)
v_adjust_vector = torch.cat([torch.stack([1 / v, v]) for v in v_adjust_factor])
tanv = tanv * v_adjust_vector
uv[:, 1] = torch.atan(tanv)
# 第二步:构建初始边框
# 所有的点uv已知,投影到xyz上、固定y轴的值为height或-1.6,得到x和z
# 根据beginFromZ和置信度,refine每个点的x坐标和z坐标。
xyz = uv2unitxyz(uv)
# xyz[0::2] = xyz[0::2] / xyz[0::2, 1] * height
# xyz[1::2] = xyz[1::2] / xyz[1::2, 1] * -1.6
xz = (xyz[1::2] / xyz[1::2, 1:2] * -1.6)[:, [0, 2]]
e_coor = uv2coor(uv, *prob_map.shape)
if cfg.POST_PROCESS.V2.SECOND_START_PROB:
prob_full = map_coordinates_Pytorch(prob_map, start_corners.T.flip(0))
else:
prob_full = map_coordinates_Pytorch(prob_map, e_coor.T.flip(0))
prob = (prob_full[0::2] + prob_full[1::2]) / 2
dis = torch.zeros_like(xz[:, 0])
l = len(dis)
for i in range(l):
if cfg.POST_PROCESS.V2.STRAIGHTEN_BY_PROB:
factor = torch.stack([prob[i], prob[(i + 1) % l]])
factor = factor / factor.sum()
if cfg.POST_PROCESS.V2.STRAIGHTEN_WHEN_BETTER:
if factor[0] > 0.75:
factor = factor.new_tensor([1.0, 0.0])
elif factor[1] > 0.75:
factor = factor.new_tensor([0.0, 1.0])
else:
factor = prob.new_tensor([0.5, 0.5])
if beginFromZ:
# xz[0]到xz[1]是沿z方向的,所以应该强迫i=0时x值相等,填入dis[0]处
if i % 2 == 0:
dis[i] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
else:
dis[i] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
# 否则,xz[0]到xz[1]是沿x方向的,则应该强迫i=0时z值相等,填入1处
if i % 2 == 0:
dis[(i + 1) % l] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
dis[(i + 1) % l] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
class CornersSolveModule(nn.Module):
def __init__(self):
super().__init__()
self.height = nn.Parameter(height.clone().detach().requires_grad_(True))
self.dis = nn.Parameter(dis.clone().detach().requires_grad_(True))
def toUvs(self):
cor_num = len(self.dis)
idxs = torch.arange(cor_num, device=self.dis.device)
idxs = torch.stack([idxs, (idxs + 1) % cor_num], 1)
idxs[1::2] = idxs[1::2].flip(1)
dis = self.dis
disU = torch.atan2(dis[0::2], dis[1::2]) # atan2(x/z)
seq = disU.argsort()
seq = (seq * 2).repeat_interleave(2)
seq[1::2] += 1
dis = dis[seq]
xyz = torch.cat([
torch.stack([dis[idxs[:, 0]], self.height.repeat(len(idxs)),
dis[idxs[:, 1]]], 1),
torch.stack([dis[idxs[:, 0]], dis.new_tensor(-1.6).repeat(len(idxs)),
dis[idxs[:, 1]]], 1),
], 0)
uv = xyz2uv(xyz)
# 不再排序,而是直接按照uv中的既定顺序,只是找到u最小的点从这里开始
startPlace = uv[0:cor_num].argmin()
seq = torch.arange(startPlace, startPlace + cor_num, device=uv.device) % cor_num
seq = seq.repeat_interleave(2)
seq[1::2] += cor_num
uv = uv[seq]
return uv
def forward(self):
# return torch.tensor(20., device=self.dis.device, requires_grad=True)
uv = self.toUvs()
try:
peaks = cornersToPeaks(cfg, input, img_idx, uv, isInputUv=True)
return lossFunction(output, img_idx, peaks)
except:
traceback.print_exc()
warnings.warn("CornersSolveModule forward时抛出异常")
return torch.tensor(20., device=uv.device, requires_grad=True)
module = CornersSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
uvs = module.toUvs()
return uv2coor(uvs, *e_img_hw), torch.abs(module.forward())
| 29,019 | 43.509202 | 147 | py |
DMH-Net | DMH-Net-main/misc/utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
def group_weight(module):
# Group module parameters into two group
# One need weight_decay and the other doesn't
group_decay = []
group_no_decay = []
for m in module.modules():
if isinstance(m, nn.Linear):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.conv._ConvNd):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.GroupNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
assert len(list(
module.parameters())) == len(group_decay) + len(group_no_decay)
return [
dict(params=group_decay),
dict(params=group_no_decay, weight_decay=.0)
]
def adjust_learning_rate(optimizer, args):
if args.cur_iter < args.warmup_iters:
frac = args.cur_iter / args.warmup_iters
step = args.lr - args.warmup_lr
args.running_lr = args.warmup_lr + step * frac
else:
frac = (float(args.cur_iter) - args.warmup_iters) / (
args.max_iters - args.warmup_iters)
scale_running_lr = max((1. - frac), 0.)**args.lr_pow
args.running_lr = args.lr * scale_running_lr
for param_group in optimizer.param_groups:
param_group['lr'] = args.running_lr
def save_model(net, path, args):
state_dict = OrderedDict({
'args': args.__dict__,
'kwargs': {
'backbone': net.module.backbone,
'use_rnn': net.module.use_rnn,
},
'state_dict': net.state_dict(),
})
pipesave(state_dict, path)
def load_trained_model(Net, path, *args):
state_dict = pipeload(path, map_location='cpu')
net = Net(*args)
net.load_state_dict(state_dict['state_dict'], strict=False)
return net
def pipeload(filepath: str, **kwargs):
if not filepath.startswith("hdfs://"):
return torch.load(filepath, **kwargs)
with hopen(filepath, "rb") as reader:
accessor = io.BytesIO(reader.read())
state_dict = torch.load(accessor, **kwargs)
del accessor
return state_dict
def pipesave(obj, filepath: str, **kwargs):
if filepath.startswith("hdfs://"):
with hopen(filepath, "wb") as writer:
torch.save(obj, writer, **kwargs)
else:
torch.save(obj, filepath, **kwargs)
HADOOP_BIN = 'PATH=/usr/bin:$PATH hdfs'
from contextlib import contextmanager
@contextmanager
def hopen(hdfs_path, mode="r"):
pipe = None
if mode.startswith("r"):
pipe = subprocess.Popen(
"{} dfs -text {}".format(HADOOP_BIN, hdfs_path), shell=True, stdout=subprocess.PIPE)
yield pipe.stdout
pipe.stdout.close()
pipe.wait()
return
if mode == "wa":
pipe = subprocess.Popen(
"{} dfs -appendToFile - {}".format(HADOOP_BIN, hdfs_path), shell=True, stdin=subprocess.PIPE)
yield pipe.stdin
pipe.stdin.close()
pipe.wait()
return
if mode.startswith("w"):
pipe = subprocess.Popen(
"{} dfs -put -f - {}".format(HADOOP_BIN, hdfs_path), shell=True, stdin=subprocess.PIPE)
yield pipe.stdin
pipe.stdin.close()
pipe.wait()
return
raise RuntimeError("unsupported io mode: {}".format(mode))
def js_div(p_output, q_output, get_softmax=True):
"""
Function that measures JS divergence between target and output logits:
"""
if get_softmax:
p_output = F.softmax(p_output, -1)
q_output = F.softmax(q_output, -1)
log_mean_output = ((p_output + q_output )/2).log()
ploss = F.kl_div(log_mean_output, p_output, reduction='batchmean')
qloss = F.kl_div(log_mean_output, q_output, reduction='batchmean')
return (ploss + qloss) / 2 | 4,291 | 32.271318 | 105 | py |
lld-public | lld-public-master/infer.py | import numpy as np
import cv2
import torch
import torch.nn.functional as F
import pylbd
import matplotlib.pyplot as plt
import torch.nn as nn
class FeatureEncoder(nn.Module):
def initialize_l2(self, g=0):
if g==0:
g = 4
if self.depth == 2:
g = 2
if self.depth == 3:
g = 1
print 'initializing depth= '+str(self.depth)+ 'g='+str(g)
# g = 1
self.downsample_init = nn.Upsample(scale_factor=0.5, mode='bilinear')
if self.is_color:
self.conv1 = nn.Conv2d(3, 8 * g, kernel_size=3, stride=1, padding=1)
else:
self.conv1 = nn.Conv2d(1, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch1 = nn.BatchNorm2d(8 * g)
self.conv2 = nn.Conv2d(8 * g, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch2 = nn.BatchNorm2d(8 * g)
if self.depth > 0:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=2, padding=1) # 1/2
else:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=1, padding=1) # 1/2
self.batch3 = nn.BatchNorm2d(16 * g)
self.conv4 = nn.Conv2d(16 * g, 16 * g, kernel_size=3, stride=1, padding=1)
self.batch4 = nn.BatchNorm2d(16 * g)
k = 16 * g
if self.depth >= 2:
self.conv5 = nn.Conv2d(16 * g, 32 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch5 = nn.BatchNorm2d(32 * g)
self.conv6 = nn.Conv2d(32 * g, 32 * g, kernel_size=3, padding=1)
self.batch6 = nn.BatchNorm2d(32 * g)
k = 32*g
if self.depth == 3:
self.conv61 = nn.Conv2d(32 * g, 64 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch61 = nn.BatchNorm2d(64 * g)
self.conv62 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1)
self.batch62 = nn.BatchNorm2d(64 * g)
k = 64*g
if self.is_skip:
self.convu1 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) #
self.batchu1 = nn.BatchNorm2d(64 * g)
# 1/4
self.convu2 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu2 = nn.BatchNorm2d(64 * g)
self.convu3 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu3 = nn.BatchNorm2d(64 * g)
# self.conv7 = nn.Conv2d(k, k, kernel_size=8, padding=0)
self.conv7 = nn.Conv2d(k, k, kernel_size=7, padding=3)
self.batch7 = nn.BatchNorm2d(k)
self.dropout = nn.Dropout2d()
if self.is_learnable:
self.deconv_64 = nn.ConvTranspose2d(k, k, stride=8, kernel_size=3)
self.deconv_32 = nn.ConvTranspose2d(k, k, stride=4, kernel_size=3)
self.deconv_16 = nn.ConvTranspose2d(k, k, stride=2, kernel_size=3)
else:
self.deconv_64 = nn.Upsample(scale_factor=8, mode='bilinear')
self.deconv_32 = nn.Upsample(scale_factor=4, mode='bilinear')
self.deconv_16 = nn.Upsample(scale_factor=2, mode='bilinear')
self.deconv_final = nn.Upsample(scale_factor=self.upscale_factor, mode='bilinear')
def __init__(self, is_cuda, is_color=False, upscale_factor=1, is_pyramid=True, depth=2, g=0,
is_learnable=False, is_skip = False):
super(FeatureEncoder, self).__init__()
self.margins = -1*torch.ones(2).float().cuda()
self.final_size = -1 * torch.ones(2).float().cuda()
self.is_learnable = is_learnable
self.depth = depth
self.upscale_factor = upscale_factor
self.is_color = is_color
self.is_skip = is_skip
print 'my net init start full'
self.initialize_l2(g)
# self._initialize_weights()
print 'init L2 done'
self.is_cuda = is_cuda
self.scale = 1.0
# self.is_plain = is_plain
self.is_pyramid = is_pyramid
self.is_test_mode = False
def forward(self, x):
# print 'start '+str(x.shape)
init_shape = x.shape
x = F.relu(self.batch1(self.conv1(x)), inplace=True)
# print 'b1 ' + str(x.shape)
x = F.relu(self.batch2(self.conv2(x)), inplace=True)
x2 = x
# print 'b2 ' + str(x.shape)
x = F.relu(self.batch3(self.conv3(x)), inplace=True)
# print 'b3 ' + str(x.shape)
x = F.relu(self.batch4(self.conv4(x)), inplace=True)
if (self.depth == 0):
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
xm_1 = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
xm_2 = x[:, x2.shape[1]:, :, :]
xm = torch.stack([xm_1, xm_2], dim=1)
xm = xm.view(x.shape)
x = xm
# x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.conv7(x)
return x
# print 'b4 ' + str(x.shape)
if (self.depth == 1):
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_16(x)
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.deconv_final(x)
return x
x4 = x
x = F.relu(self.batch5(self.conv5(x)), inplace=True)
# print 'b5 ' + str(x.shape)
x = F.relu(self.batch6(self.conv6(x)), inplace=True)
# print 'b6 ' + str(x.shape)
if self.depth == 2:
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_32(x)
x = self.deconv_final(x)
return x
x8 = x
x = self.batch61(self.conv61(x))
x = self.batch62(self.conv62(x))
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
if self.is_skip:
# print('before ups '+str(x.shape))
x = F.upsample_bilinear(x, x8.shape[2:])
# print('after ups ' + str(x.shape))
x = x + x8.repeat(1, 2, 1, 1)
x = self.batchu1(self.convu1(x))
x = F.upsample_bilinear(x, x4.shape[2:])
# print(x4.shape)
x4r = x4.repeat(1, 4, 1, 1)
# print(x4r.shape)
x = x + x4r
x = self.batchu2(self.convu2(x))
x = F.upsample_bilinear(x, init_shape[2:])
x = x + x2.repeat(1,8,1,1)
x = self.batchu3(self.convu3(x))
else:
x = self.deconv_64(x)
x = self.deconv_final(x)
return x
def sample_descriptors(x, lines, w_img, h_img):
w_map = x.shape[3]
h_map = x.shape[2]
# print(x.shape)
m_x = (w_img-w_map)/2.0
m_y = (h_img-h_map)/2.0
# print(w_img)
# print(m_x)
lines_x_flat = lines[:, :, :, 0].contiguous().view(-1)
good_lines = (lines_x_flat > 0).nonzero()
# print(str(len(good_lines))+ ' ' + str(lines_x_flat.shape[0]))
lines_x_flat[good_lines] = lines_x_flat[good_lines] + m_x*torch.ones_like(lines_x_flat[good_lines])
lines_x_flat[good_lines] = 2.0/w_map*lines_x_flat[good_lines] - torch.ones_like(lines_x_flat[good_lines])
lines[:, :, :, 0] = lines_x_flat.view(lines[:, :, :, 0].shape)
lines_y_flat = lines[:, :, :, 1].contiguous().view(-1)
good_lines = (lines_y_flat > 0).nonzero()
lines_y_flat[good_lines] = lines_y_flat[good_lines] + m_y*torch.ones_like(lines_y_flat[good_lines])
lines_y_flat[good_lines] = 2.0 / h_map * lines_y_flat[good_lines] - torch.ones_like(lines_y_flat[good_lines])
lines[:, :, :, 1] = lines_y_flat.view(lines[:, :, :, 1].shape)
lds = F.grid_sample(x, lines, mode='bilinear', padding_mode='border')
avg_lds = torch.sum(lds, 3)
avg_lds = F.normalize(avg_lds, p=2, dim=1)
return avg_lds
def prepare_grid_numpy_vec(ld, s, pt_per_line):
cur_line_num = ld.shape[1]
if cur_line_num == 0:
return []
x_s = ld[0:2,:]
x_e = ld[2:4, :]
pts_lst = []
for j in range(0, pt_per_line):
c = (1.0+2*j)/(2*pt_per_line)
coordmat = s*(x_s*(1-c)+ x_e*c)# - m_rep
pts_lst.append(coordmat.transpose(1,0))
return np.stack(pts_lst, axis=2).transpose(0,2,1)
def prepare_input(img, lines, is_cuda):
img = img.reshape(1, 1, img.shape[0], img.shape[1])
img = np.asarray(img).astype(float)
img = torch.from_numpy(img).float()
lines = lines[:, 7:11].transpose()
lines = prepare_grid_numpy_vec(lines, 1.0, pt_per_line=5)
lines = lines.reshape(1, lines.shape[0], lines.shape[1], lines.shape[2])
lines = torch.from_numpy(lines).float()
if is_cuda:
img = img.cuda()
lines = lines.cuda()
return img, lines
def match_using_lbd(img1, img2, n_oct, factor):
gray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
lbd1, lines1 = pylbd.detect_and_describe(gray1, n_oct, factor)
lbd2, lines2 = pylbd.detect_and_describe(gray2, n_oct, factor)
matches = pylbd.match_lbd_descriptors(lbd1, lbd2)
debug_lbd = pylbd.visualize_line_matching(img1, lines1, img2, lines2, matches, True)
return debug_lbd
def test_line_matching(weights_path, is_cuda):
n_oct = 1
factor = 1.44
img1 = cv2.imread('kitti_8_left.png')
img2 = cv2.imread('kitti_8_right.png')
w_img = img1.shape[1]
h_img = img1.shape[0]
debug_lbd = match_using_lbd(img1, img2, n_oct, factor)
cv2.imwrite('test_lbd.png', debug_lbd)
plt.figure('LBD')
plt.imshow(debug_lbd)
encoder_net = FeatureEncoder(is_cuda=True, is_color=False, is_pyramid = False, depth=3, g=0)
if is_cuda:
encoder_net = encoder_net.cuda()
checkpoint = torch.load(weights_path)
encoder_net.load_state_dict(checkpoint['state_dict'])
gray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
lines1 = pylbd.detect_edlines(gray1, n_oct, factor)
img1_torch, lines1_torch = prepare_input(gray1, lines1, is_cuda)
y = encoder_net(img1_torch)
d1 = sample_descriptors(y, lines1_torch, w_img, h_img).detach().cpu().numpy()
gray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
lines2 = pylbd.detect_edlines(gray2, n_oct, factor)
img2_torch, lines2_torch = prepare_input(gray2, lines2, is_cuda)
y2 = encoder_net(img2_torch)
d2 = sample_descriptors(y2, lines2_torch, w_img, h_img).detach().cpu().numpy()
nd1 = d1.shape[2]
nd2 = d2.shape[2]
match_lst = []
d1 = d1.reshape(64, -1).transpose()
d2 = d2.reshape(64, -1).transpose()
for i in range(0, nd1):
ld1 = d1[i]
min_dist = 1e10
best_match = 0
for j in range(0, nd2):
ld2 = d2[j]
dist = 2 - ld1.dot(ld2)
if dist < min_dist:
min_dist = dist
best_match = j
match_result = [i, best_match, 0, 1]
match_lst.append(match_result)
matches_lld = np.asarray(match_lst).astype(int)
debug_lld_img = pylbd.visualize_line_matching(img1, lines1, img2, lines2, matches_lld, True)
cv2.imwrite('test_lld.png', debug_lld_img)
plt.figure('LLD')
plt.imshow(debug_lld_img)
plt.show()
test_line_matching(weights_path = '/storage/projects/lld/1.pyh.tar', is_cuda = False) | 11,351 | 37.744027 | 113 | py |
lld-public | lld-public-master/train.py | import torch
import data.batched as ba
import cnn.net_multibatch as nmb
import torch.optim as optim
import os
import train.multibatch_trainer as mbt
import tqdm
from torch.autograd import Variable
import numpy as np
dir_path = '../traindata/'
ptnum = 5
is_noisy = False
def get_net():
return nmb.FeatureEncoder(is_cuda=True, is_color=False, is_pyramid = False, depth=3, g=0).cuda()
def train_multibatch(se=-1):
train_loader, test_loader, test_loader_2 = ba.get_combined_training_v2(pt_per_line=ptnum, is_noisy=is_noisy)
encoder_net = get_net()
optimizer = optim.Adam(encoder_net.parameters(), lr = 1e-4)
mbt.run_training(dir_path, train_loader, test_loader, encoder_net, optimizer, is_triplet=False, start_epoch=se, vdseqid='7')
def eval_multibatch(ep_id):
train_loader, test_loader, test_loader2 = ba.get_combined_training(pt_per_line=ptnum)
encoder_net = get_net()
mbt.run_validation(dir_path, test_loader2, encoder_net, ep_id, is_triplet=False, vqseqid='9', is_save=True)
def test_with_descriptors_hetero(ep_id=0):
encoder_net = get_net()
kitti_ids = [8, 9, 10]
seq_ids_kitti = np.arange(14, 17)
euroc_ids = [2, 4, 6]
seq_ids_euroc = np.arange(17, 20)
seq_map = {}
for i in range(0, len(kitti_ids)):
seq_map[seq_ids_kitti[i]] = ('kitti', kitti_ids[i])
for i in range(0, len(euroc_ids)):
seq_map[seq_ids_euroc[i]] = ('euroc', euroc_ids[i])
mbt.run_test_heterogen(dir_path, encoder_net, seq_map, ep_id=ep_id, is_triplet=False, do_savedesc=True,
pt_per_line=ptnum)
train_multibatch()
eval_multibatch(1)
test_with_descriptors_hetero(1)
| 1,661 | 29.777778 | 128 | py |
lld-public | lld-public-master/cnn/net_multibatch.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import tqdm
from torch.autograd import Variable
import torch.optim as optim
import time
import sklearn.metrics as metrics
import numpy as np
def compute_distances(x, pos_inds, neg_mask):
#x: b x C x N
#pos_mask: b x n_p, pos_inds: b x n_p, neg_mask b x N x np
pos_mask = (pos_inds >= 0).float()
pos_anti_mask = (pos_inds<0)
b, np = pos_inds.shape
C = x.shape[1]
pos_inds[pos_anti_mask] = 0
pos_inds_exp = pos_inds.view(b, 1, np).expand(-1, C, -1)
# x_pos = torch.index_select(x, 2, pos_inds_exp)
x_pos = torch.gather(x, 2, pos_inds_exp)
#x_pos b x C x n_p
x_pos1 = x_pos[1:, :, :].permute(0, 2, 1).contiguous().view(-1, C)
x_pos0 = x_pos[0]
x_pos_from = x_pos0.view(1, C, np).expand(b-1, -1, -1).permute(0, 2, 1).contiguous().view(-1, C)
# pos_mask_flat = pos_mask[1:,:].reshape(-1)
pos_mask_part = pos_mask[1:,:]
dists_pos = F.pairwise_distance(x_pos_from, x_pos1).view(b-1, np)
# d_pos = torch.sum(F.pairwise_distance(x_pos_from, x_pos1) * pos_mask_flat)
N = x.shape[2]
x_pos_from = x_pos0.view(1, 1, C, np).expand(b-1, N, -1, -1).permute(0, 1, 3, 2).contiguous().view(-1, C)
x_to = x[1:].permute(0, 2, 1).view(b-1, N, 1, C).expand(-1, -1, np, -1).contiguous().view(-1, C)
# print x_pos_from.shape
# print x_to.shape
# print b
# print N
dists_flat = F.pairwise_distance(x_pos_from, x_to)
dists = dists_flat.view(b-1, N, np)
neg_mask_part = neg_mask[1:].float()
max_dist = 2.0
dists_neg = dists * neg_mask_part + max_dist * (torch.ones_like(neg_mask_part) - neg_mask_part)
#dists_pos: b-1 x np
#pos_mask_part: b-1 x np
#dists_neg: b-1 x N x np
#neg_mask_part: b-1 x N x np
return dists_pos, pos_mask_part, dists_neg, neg_mask_part
def compute_trip_loss(x, pos_inds, neg_mask, m):
dists_pos, pos_mask_part, dists_neg, neg_mask_part = compute_distances(x, pos_inds, neg_mask)
# d_pos_flat = dists_pos.view(-1)[(pos_mask_part>0).view(-1)].detach().cpu().numpy()
# print('+ dists '+str(np.min(d_pos_flat))+' '+str(np.max(d_pos_flat)))
d_pos = torch.sum(dists_pos * pos_mask_part, 0)
# d_neg_flat = dists_neg.view(-1)[(neg_mask_part>0).view(-1)].detach().cpu().numpy()
# print('- dists ' + str(np.min(d_neg_flat)) + ' ' + str(np.max(d_neg_flat)))
# print(dists_neg.shape)
d1, a1 = torch.min(dists_neg, dim=0)
# print (d1.shape)
d_neg, a_neg = torch.min(d1, 0)
# print(d_neg.shape)
# m_var = torch.ones(1).cuda() * m
d_neg_diff = m - d_neg
# d_nd_np = d_neg_diff.detach().cpu().numpy()
# print('min neg dists '+str(np.min(d_nd_np))+' : '+str(np.max(d_nd_np)))
# print(d_neg_diff.shape)
mask = (d_neg_diff>=0).float()
# print((mask * d_neg_diff).shape)
# print(d_pos.shape)
return torch.sum(mask * d_neg_diff + d_pos)
def compute_hn_loss(x, pos_inds, neg_mask, m):
dists_pos, pos_mask_part, dists_neg, neg_mask_part = compute_distances(x, pos_inds, neg_mask)
# d_pos_flat = dists_pos.view(-1)[(pos_mask_part>0).view(-1)].detach().cpu().numpy()
# print('+ dists '+str(np.min(d_pos_flat))+' '+str(np.max(d_pos_flat)))
d_pos = torch.sum(dists_pos * pos_mask_part, 0)
# d_neg_flat = dists_neg.view(-1)[(neg_mask_part>0).view(-1)].detach().cpu().numpy()
# print('- dists ' + str(np.min(d_neg_flat)) + ' ' + str(np.max(d_neg_flat)))
# print(dists_neg.shape)
d1, a1 = torch.min(dists_neg, dim=0)
# print (d1.shape)
d_neg, a_neg = torch.min(d1, 0)
d_diff = d_pos - d_neg
# print(d_neg.shape)
# m_var = torch.ones(1).cuda() * m
d_shifted = d_diff + m
# d_nd_np = d_neg_diff.detach().cpu().numpy()
# print('min neg dists '+str(np.min(d_nd_np))+' : '+str(np.max(d_nd_np)))
# print(d_neg_diff.shape)
mask = (d_shifted>=0).float()
# print((mask * d_neg_diff).shape)
# print(d_pos.shape)
return torch.sum(mask * d_shifted)
def compute_distvecs(x, pos_inds, neg_mask):
dists_pos, pos_mask_part, dists_neg, neg_mask_part = compute_distances(x, pos_inds, neg_mask)
d_pos = np.zeros(0)
d_neg = np.zeros(0)
pos_mask_flat = pos_mask_part.view(-1)
dists_pos_flat = dists_pos.view(-1)
if pos_mask_flat.shape[0] > 0:
pos_inds = pos_mask_flat>0
d_pos = dists_pos_flat.view(-1)
if dists_pos_flat.shape[0]>0:
d_pos = d_pos[pos_inds]
if d_pos.shape[0]>0:
d_pos = d_pos.detach().cpu().numpy()
neg_mask_flat = dists_neg.view(-1)
if neg_mask_flat.shape[0] > 0:
neg_inds = neg_mask_flat<2.0
dists_neg_flat = dists_neg.view(-1)
if dists_neg_flat.shape[0]>0:
d_neg_t = dists_neg_flat[neg_inds]#.detach().cpu().numpy()
if d_neg_t.shape[0]>0:
d_neg = d_neg_t.detach().cpu().numpy()
return d_pos, d_neg
def sample_descriptors(x, lines, w_img, h_img):
w_map = x.shape[3]
h_map = x.shape[2]
# print(x.shape)
m_x = (w_img-w_map)/2.0
m_y = (h_img-h_map)/2.0
# print(w_img)
# print(m_x)
lines_x_flat = lines[:, :, :, 0].view(-1)
good_lines = (lines_x_flat > 0).nonzero()
# print(str(len(good_lines))+ ' ' + str(lines_x_flat.shape[0]))
lines_x_flat[good_lines] = lines_x_flat[good_lines] + m_x*torch.ones_like(lines_x_flat[good_lines])
lines_x_flat[good_lines] = 2.0/w_map*lines_x_flat[good_lines] - torch.ones_like(lines_x_flat[good_lines])
lines[:, :, :, 0] = lines_x_flat.view(lines[:, :, :, 0].shape)
lines_y_flat = lines[:, :, :, 1].view(-1)
good_lines = (lines_y_flat > 0).nonzero()
lines_y_flat[good_lines] = lines_y_flat[good_lines] + m_y*torch.ones_like(lines_y_flat[good_lines])
lines_y_flat[good_lines] = 2.0 / h_map * lines_y_flat[good_lines] - torch.ones_like(lines_y_flat[good_lines])
lines[:, :, :, 1] = lines_y_flat.view(lines[:, :, :, 1].shape)
lds = F.grid_sample(x, lines, mode='bilinear', padding_mode='border')
avg_lds = torch.sum(lds, 3)
avg_lds = F.normalize(avg_lds, p=2, dim=1)
return avg_lds
class FeatureEncoder(nn.Module):
def initialize_l2(self, g=0):
if g==0:
g = 4
if self.depth == 2:
g = 2
if self.depth == 3:
g = 1
print 'initializing depth= '+str(self.depth)+ 'g='+str(g)
# g = 1
self.downsample_init = nn.Upsample(scale_factor=0.5, mode='bilinear')
if self.is_color:
self.conv1 = nn.Conv2d(3, 8 * g, kernel_size=3, stride=1, padding=1)
else:
self.conv1 = nn.Conv2d(1, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch1 = nn.BatchNorm2d(8 * g)
self.conv2 = nn.Conv2d(8 * g, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch2 = nn.BatchNorm2d(8 * g)
if self.depth > 0:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=2, padding=1) # 1/2
else:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=1, padding=1) # 1/2
self.batch3 = nn.BatchNorm2d(16 * g)
self.conv4 = nn.Conv2d(16 * g, 16 * g, kernel_size=3, stride=1, padding=1)
self.batch4 = nn.BatchNorm2d(16 * g)
k = 16 * g
if self.depth >= 2:
self.conv5 = nn.Conv2d(16 * g, 32 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch5 = nn.BatchNorm2d(32 * g)
self.conv6 = nn.Conv2d(32 * g, 32 * g, kernel_size=3, padding=1)
self.batch6 = nn.BatchNorm2d(32 * g)
k = 32*g
if self.depth == 3:
self.conv61 = nn.Conv2d(32 * g, 64 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch61 = nn.BatchNorm2d(64 * g)
self.conv62 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1)
self.batch62 = nn.BatchNorm2d(64 * g)
k = 64*g
if self.is_skip:
self.convu1 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) #
self.batchu1 = nn.BatchNorm2d(64 * g)
# 1/4
self.convu2 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu2 = nn.BatchNorm2d(64 * g)
self.convu3 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu3 = nn.BatchNorm2d(64 * g)
# self.conv7 = nn.Conv2d(k, k, kernel_size=8, padding=0)
self.conv7 = nn.Conv2d(k, k, kernel_size=7, padding=3)
self.batch7 = nn.BatchNorm2d(k)
self.dropout = nn.Dropout2d()
if self.is_learnable:
self.deconv_64 = nn.ConvTranspose2d(k, k, stride=8, kernel_size=3)
self.deconv_32 = nn.ConvTranspose2d(k, k, stride=4, kernel_size=3)
self.deconv_16 = nn.ConvTranspose2d(k, k, stride=2, kernel_size=3)
else:
self.deconv_64 = nn.Upsample(scale_factor=8, mode='bilinear')
self.deconv_32 = nn.Upsample(scale_factor=4, mode='bilinear')
self.deconv_16 = nn.Upsample(scale_factor=2, mode='bilinear')
self.deconv_final = nn.Upsample(scale_factor=self.upscale_factor, mode='bilinear')
def __init__(self, is_cuda, is_color=False, upscale_factor=1, is_pyramid=True, depth=2, g=0,
is_learnable=False, is_skip = False):
super(FeatureEncoder, self).__init__()
self.margins = -1*torch.ones(2).float().cuda()
self.final_size = -1 * torch.ones(2).float().cuda()
self.is_learnable = is_learnable
self.depth = depth
self.upscale_factor = upscale_factor
self.is_color = is_color
self.is_skip = is_skip
print 'my net init start full'
self.initialize_l2(g)
# self._initialize_weights()
print 'init L2 done'
self.is_cuda = is_cuda
self.scale = 1.0
# self.is_plain = is_plain
self.is_pyramid = is_pyramid
self.is_test_mode = False
def forward(self, x):
# print 'start '+str(x.shape)
init_shape = x.shape
x = F.relu(self.batch1(self.conv1(x)), inplace=True)
# print 'b1 ' + str(x.shape)
x = F.relu(self.batch2(self.conv2(x)), inplace=True)
x2 = x
# print 'b2 ' + str(x.shape)
x = F.relu(self.batch3(self.conv3(x)), inplace=True)
# print 'b3 ' + str(x.shape)
x = F.relu(self.batch4(self.conv4(x)), inplace=True)
if (self.depth == 0):
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
xm_1 = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
xm_2 = x[:, x2.shape[1]:, :, :]
xm = torch.stack([xm_1, xm_2], dim=1)
xm = xm.view(x.shape)
x = xm
# x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.conv7(x)
return x
# print 'b4 ' + str(x.shape)
if (self.depth == 1):
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_16(x)
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.deconv_final(x)
return x
x4 = x
x = F.relu(self.batch5(self.conv5(x)), inplace=True)
# print 'b5 ' + str(x.shape)
x = F.relu(self.batch6(self.conv6(x)), inplace=True)
# print 'b6 ' + str(x.shape)
if self.depth == 2:
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_32(x)
x = self.deconv_final(x)
return x
x8 = x
x = self.batch61(self.conv61(x))
x = self.batch62(self.conv62(x))
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
if self.is_skip:
# print('before ups '+str(x.shape))
x = F.upsample_bilinear(x, x8.shape[2:])
# print('after ups ' + str(x.shape))
x = x + x8.repeat(1, 2, 1, 1)
x = self.batchu1(self.convu1(x))
x = F.upsample_bilinear(x, x4.shape[2:])
# print(x4.shape)
x4r = x4.repeat(1, 4, 1, 1)
# print(x4r.shape)
x = x + x4r
x = self.batchu2(self.convu2(x))
x = F.upsample_bilinear(x, init_shape[2:])
x = x + x2.repeat(1,8,1,1)
x = self.batchu3(self.convu3(x))
else:
x = self.deconv_64(x)
x = self.deconv_final(x)
return x
def net_train(loader, encoder_net, optimizer, is_triplet=True):
encoder_net.train()
avg_loss = 0
for data in tqdm.tqdm(loader):
x = Variable(data['images'].cuda(), requires_grad=False)
lines = Variable(data['lines'].cuda(), requires_grad=False)
# torch.cuda.synchronize()
# torch.cuda.synchronize()
# t0 = time.time()
y = encoder_net(x)
# torch.cuda.synchronize()
# torch.cuda.synchronize()
# t1 = time.time()
w_img = x[0].shape[2]
h_img = x[0].shape[1]
d = sample_descriptors(y, lines, w_img, h_img)
# d_np = d.detach().cpu().numpy()
# print(np.linalg.norm(d_np[0,:,0]))
pos_inds = Variable(data['positives'].cuda(), requires_grad=False)
neg_mask = Variable(data['negatives'].cuda(), requires_grad=False)
l = 0
if is_triplet:
l = compute_trip_loss(d, pos_inds, neg_mask, m=0.5)
else:
l = compute_hn_loss(d, pos_inds, neg_mask, m=0.5)
avg_loss += l.detach().cpu().numpy()
optimizer.zero_grad()
# print(l.detach().cpu().numpy())
l.backward()
# torch.cuda.synchronize()
# torch.cuda.synchronize()
# t2 = time.time()
# print 'fwd time '+str(t1 - t0)
# print 'bwd time ' + str(t2 - t1)
optimizer.step()
print('avg train loss '+str(avg_loss/len(loader)))
return encoder_net
def get_ap(all_pos, all_neg):
all_pos = np.concatenate(tuple(all_pos), axis=0)
y_true_pos = np.ones(all_pos.shape[0])
all_neg = np.concatenate(tuple(all_neg), axis=0)
y_true_neg = np.zeros(all_neg.shape[0])
y_true = np.concatenate((y_true_pos, y_true_neg), axis=0)
y_est = 1.0 - 0.5 * np.concatenate((all_pos, all_neg), axis=0)
curr_ap = metrics.average_precision_score(y_true, y_est)
return curr_ap
def net_test(loader, encoder_net, is_triplet = True, save_descs=False, save_folder = ''):
encoder_net.eval()
with torch.no_grad():
all_pos = []
all_neg = []
cnt = 0
avg_loss = 0
avg_time = 0
for data in tqdm.tqdm(loader):
x = Variable(data['images'].cuda(), requires_grad=False)
if len(x) == 0:
continue
# print('bs='+str(len(x)))
lines = Variable(data['lines'].cuda(), requires_grad=False)
torch.cuda.synchronize()
torch.cuda.synchronize()
t0 = time.time()
y = encoder_net(x)
w_img = x[0].shape[2]
h_img = x[0].shape[1]
d = sample_descriptors(y, lines, w_img, h_img)
torch.cuda.synchronize()
torch.cuda.synchronize()
t1 = time.time()
# print(str(t1 - t0) + ' sec')
l = 0
pos_inds = Variable(data['positives'].cuda(), requires_grad=False)
neg_mask = Variable(data['negatives'].cuda(), requires_grad=False)
if is_triplet:
l = compute_trip_loss(d, pos_inds, neg_mask, m=0.5)
else:
l = compute_hn_loss(d, pos_inds, neg_mask, m=0.5)
l = l.detach().cpu().numpy()
avg_loss += l
avg_time += t1-t0
# pos_inds = Variable(data['positives'].cuda(), requires_grad=False)
# neg_mask = Variable(data['negatives'].cuda(), requires_grad=False)
# d_pos, d_neg = compute_distvecs(d, pos_inds, neg_mask)
# cnt += 1
# all_pos.append(d_pos)
# all_neg.append(d_neg)
if save_descs:
img_ids = data['image_ids']
pair_ids = data['pair_ids']
ln_lens = data['ln_lens']
for ii in range(0, len(img_ids)):
img_id = img_ids[ii]
pair_id = pair_ids[ii]
f_out = open(save_folder + '/' + str(img_id) + '_' + str(pair_id) + '.txt', 'w')
lines_np = lines.detach().cpu().numpy()
descs_np = d.detach().cpu().numpy()
for lind in range(0, ln_lens[ii]):
# if lines_np[ii, lind, 0, 0] == 0 and lines_np[ii, lind, -1, 0] == 0:
# # print('breaking the cycle iid ' +str(img_id))
# # print(descs_np[ii, 0, lind:lind+10])
# break
dcur = descs_np[ii, :, lind].reshape(-1)
for j in range(0, len(dcur)):
f_out.write(str(dcur[j]) + ' ')
f_out.write('\n')
# if cnt % 100 == 0:
# curr_ap = get_ap(all_pos, all_neg)
# print(str(cnt/len(loader))+' ' + str(curr_ap))
# curr_ap = get_ap(all_pos, all_neg)
# print('final: ' + str(curr_ap)+' loss '+str(avg_loss/len(loader))+' time '+str(avg_time/len(loader)))
# return curr_ap
return 0
def net_inference(loader, encoder_net, save_descs=False, save_folder = ''):
encoder_net.eval()
with torch.no_grad():
all_pos = []
all_neg = []
cnt = 0
avg_loss = 0
avg_time = 0
for data in tqdm.tqdm(loader):
x = Variable(data['images'].cuda(), requires_grad=False)
if len(x) == 0:
continue
# print('bs='+str(len(x)))
lines = Variable(data['lines'].cuda(), requires_grad=False)
torch.cuda.synchronize()
torch.cuda.synchronize()
t0 = time.time()
y = encoder_net(x)
w_img = x[0].shape[2]
h_img = x[0].shape[1]
d = sample_descriptors(y, lines, w_img, h_img)
torch.cuda.synchronize()
torch.cuda.synchronize()
t1 = time.time()
avg_time += t1-t0
if save_descs:
img_ids = data['image_ids']
pair_ids = data['pair_ids']
ln_lens = data['ln_lens']
for ii in range(0, len(img_ids)):
img_id = img_ids[ii]
pair_id = pair_ids[ii]
f_out = open(save_folder + '/' + str(img_id) + '_' + str(pair_id) + '.txt', 'w')
descs_np = d.detach().cpu().numpy()
for lind in range(0, ln_lens[ii]):
dcur = descs_np[ii, :, lind].reshape(-1)
for j in range(0, len(dcur)):
f_out.write(str(dcur[j]) + ' ')
f_out.write('\n')
return 0
| 19,314 | 38.418367 | 113 | py |
lld-public | lld-public-master/train/multibatch_trainer.py | import torch
import os
import time
import numpy as np
import data.batched as ba
import torch.optim as optim
import cnn.net_multibatch as nmb
def compose_batch(batch):
batch = batch[0]
n = len(batch[0])
ims = np.asarray(batch[0]).astype(float)
ims = torch.from_numpy(ims).float()
lines = batch[1]
ln_lens = [l.shape[0] for l in lines]
ln_max = np.max(np.asarray(ln_lens))
pt_num = lines[0].shape[2]
lines_torch = torch.zeros(n, ln_max, pt_num, 2)
for i in range(0, n):
lines_torch[i, 0:lines[i].shape[0], :, :] = torch.from_numpy(lines[i].astype(float)).float()
negs = batch[2]
poss = batch[3]
n_pos = len(poss[0])
max_neg = max([max([len(neg_for_pos) for neg_for_pos in neg]) for neg in negs])
neg_t = -1*torch.ones(n, n_pos, max_neg)
for i in range(0, len(negs)):
for j in range(0, len(negs[i])):
neg_for_pos = negs[i][j]
if len(neg_for_pos)>0:
neg_t[i][j][0:len(neg_for_pos)] = torch.from_numpy(np.asarray(neg_for_pos).astype(long))
pos_t = -1*torch.ones(n, n_pos)
for i in range(0, len(poss)):
p = poss[i]
for j in range(0, len(p)):
if len(p[j]) == 1:
pos_t[i, j] = p[j][0]
if len(batch) == 4:
return ims, lines_torch, neg_t, poss # , batch[0][4], batch[0][5]
else:
return ims, lines_torch, neg_t, poss, batch[4], batch[5], batch[6]
def run_training(dir_path, train_loader, test_loader, encoder_net, optimizer, start_epoch=-1, is_triplet = True, vdseqid='7'):
if start_epoch >= 0:
checkpoint = torch.load(dir_path + '/' + str(start_epoch) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print 'loaded epoch ' + str(start_epoch)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for ep_id in range(start_epoch + 1, 8):
# nmb.net_test(train_loader, encoder_net)
encoder_net = nmb.net_train(train_loader, encoder_net, optimizer, is_triplet=is_triplet)
valdesc_folder = dir_path+"/val_descs_"+str(ep_id)+"/"+vdseqid+'/'
if not os.path.exists(valdesc_folder):
os.makedirs(valdesc_folder)
nmb.net_test(test_loader, encoder_net, save_descs=True, save_folder=valdesc_folder)
torch.save({
'epoch': ep_id,
'state_dict': encoder_net.state_dict(),
'optimizer': optimizer.state_dict(),
}, dir_path + '/' + str(ep_id) + '.pyh.tar')
def run_validation(dir_path, test_loader, encoder_net, ep_id, is_triplet = True, is_save=False, vqseqid=''):
# for ep_id in range(start_epoch + 1, 10):
checkpoint = torch.load(dir_path + '/' + str(ep_id) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer'])
print 'loaded epoch ' + str(ep_id)
val_descs_save_path = dir_path+'/val_descs_'+str(ep_id)+'/'+vqseqid+'/'
if not os.path.exists(val_descs_save_path):
os.makedirs(val_descs_save_path)
curr_ap = nmb.net_test(test_loader, encoder_net, is_triplet=is_triplet, save_descs=is_save, save_folder=val_descs_save_path )
rep_txt = open(dir_path + '/report.txt', 'a')
rep_txt.write(str(ep_id) + ':' + str(curr_ap) + '\n')
def run_test_heterogen(dir_path, encoder_net, seq_map, ep_id=0, is_triplet = True, do_savedesc=True, pt_per_line=5, n_lim=6):
checkpoint = torch.load(dir_path + '/' + str(ep_id) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
print 'loaded epoch ' + str(ep_id)
for seq_id in seq_map:
seq_type, seq_save_code = seq_map[seq_id]
test_loader = ba.get_combined_test(seq_id, pt_per_line, n_lim)
if seq_type == 'kitti':
save_dir = dir_path + '/descs' + '/' + str(seq_save_code) + '/'
else:
save_dir = dir_path + '/descs_euroc' + '/' + str(seq_save_code) + '/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
nmb.net_test(test_loader, encoder_net, is_triplet = is_triplet, save_descs=do_savedesc, save_folder = save_dir)
def run_inference(dir_path, encoder_net, ep_id=0, do_savedesc=True):
checkpoint = torch.load(dir_path + '/' + str(ep_id) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
print 'loaded epoch ' + str(ep_id)
inf_loader = ba.get_loader()
nmb.net_inference(inf_loader, encoder_net, save_descs=do_savedesc, save_folder=dir_path+'/inf_descs')
| 4,564 | 41.268519 | 129 | py |
lld-public | lld-public-master/data/line_sampler.py | import numpy as np
import torch
import time
def prepare_line_grid(lines_pair, margins, s, map_size, is_plain, pt_per_line):
line_grid = prepare_grid_plain(lines_pair, margins, s, is_plain, pt_per_line)
for i in range(0, 2):
line_grid[:, :, :, i] -= 0.5 * map_size[i]
line_grid[:, :, :, i] /= 0.5 * map_size[i]
return line_grid
def sample_line(linedata, line_grid, i, li, s, margins, is_plain, pt_per_line):
segs = []
seg_num = 2
if is_plain:
seg_num = 1
for si in range(0, seg_num):
seg = linedata[4 * si: 4 * si + 4]
if np.linalg.norm(seg) > 0:
segs.append(seg)
if len(segs) == 0:
return
seg_main = segs[0]
x_s = np.float32(seg_main[0:2])
x_e = np.float32(seg_main[2:4])
dx = x_e - x_s
pt_step = 1.0 / pt_per_line * dx
# print 'line points start '+str(x_s) + ' end ' + str(x_e) + ' step ' + str(pt_step) + ' per line ' + str(self.pt_per_line)
for pti in range(0, pt_per_line):
pt_x = x_s + (0.5 + pti) * pt_step
line_grid[i, li, pti, :] = s * pt_x - margins
def prepare_grid_vectorized_numpy(lines_pair, margins, s, pt_per_line):
line_num = np.max([lines_pair[0].shape[1], lines_pair[1].shape[1]])
line_grid = np.zeros((2, line_num, pt_per_line, 2))
margins_vec = np.asarray(margins).reshape((2, 1))
for i in range(0, 2):
ld = lines_pair[i]
cur_line_num = ld.shape[1]
if cur_line_num == 0:
continue
x_s = ld[0:2,:]
x_e = ld[2:4, :]
for j in range(0, pt_per_line):
c = (1.0+2*j)/(2*pt_per_line)
m_rep = np.tile(margins_vec, (1, x_s.shape[1]))
line_grid[i, 0:cur_line_num, j, :] = np.transpose(s*(x_s*(1-c)+ x_e*c) - m_rep, (1,0))
return line_grid
def prepare_grid_numpy_vec(ld, s, pt_per_line):
coordmats_lst = []
cur_line_num = ld.shape[1]
if cur_line_num == 0:
return []
x_s = ld[0:2,:]
x_e = ld[2:4, :]
pts_lst = []
for j in range(0, pt_per_line):
t0 = time.time()
c = (1.0+2*j)/(2*pt_per_line)
# m_rep = margins.view(2,1).expand(-1, x_s.shape[1])
coordmat = s*(x_s*(1-c)+ x_e*c)# - m_rep
# line_grid[i, 0:cur_line_num, j, :] = coordmat.permute(1,0)
pts_lst.append(coordmat.transpose(1,0))
t1= time.time()
# print 'point compute takes '+str(t1-t0)
return np.stack(pts_lst, axis=2).transpose(0,2,1)
# allpts = torch.stack(pts_lst, dim = 2).permute(0,2,1)
def prepare_grid_vectorized(lines_pair, margins, s, pt_per_line):
# line_num = torch.max([lines_pair[0].shape[1], lines_pair[1].shape[1]])
# line_num = lines_pair.shape[2]
t0 = time.time()
# line_grid = torch.zeros((2, line_num, pt_per_line, 2)).float().cuda()
t1 = time.time()
# print 'line grid takes '+str(t1-t0)
coordmats_lst = []
for i in range(0, 2):
ld = lines_pair[i]
cur_line_num = ld.shape[1]
if cur_line_num == 0:
continue
x_s = ld[0:2,:]
x_e = ld[2:4, :]
pts_lst = []
for j in range(0, pt_per_line):
t0 = time.time()
c = (1.0+2*j)/(2*pt_per_line)
# m_rep = margins.view(2,1).expand(-1, x_s.shape[1])
coordmat = s*(x_s*(1-c)+ x_e*c)# - m_rep
# line_grid[i, 0:cur_line_num, j, :] = coordmat.permute(1,0)
pts_lst.append(coordmat.permute(1,0))
t1= time.time()
# print 'point compute takes '+str(t1-t0)
allpts = torch.stack(pts_lst, dim = 2).permute(0,2,1)
coordmats_lst.append(allpts)
line_grid = torch.stack(coordmats_lst, dim=0)
return line_grid
def prepare_grid_plain(lines_pair, margins, s, is_plain, pt_per_line):
line_num = np.max([lines_pair[0].shape[1], lines_pair[1].shape[1]])
line_grid = np.zeros((2, line_num, pt_per_line, 2))
for i in range(0, 2):
for li in range(0, lines_pair[i].shape[1]):
linedata = lines_pair[i][:, li]
sample_line(linedata, line_grid, i, li, s, margins, is_plain, pt_per_line)
return line_grid
| 4,132 | 35.901786 | 127 | py |
lld-public | lld-public-master/data/batched.py | import os
import cv2
from torch.utils.data import Dataset
import numpy as np
import sys
import random
import torch
import line_sampler
def add_noise(im):
return im.astype(float) + 30*np.random.randn(im.shape[0], im.shape[1])
def get_image_id(call_id, f1):
mtd = 5
main_id = 5 * call_id
pair_id = -1
img_id = -1
if f1 < 4:
pair_id = np.mod(f1, 2)
img_id = np.floor(f1 / 2) + main_id + 5
return int(img_id), pair_id
else:
fm = np.floor((f1 - 4) / 2)
cnt = 0
for j in range(1, mtd + 1):
for k in range(-1, 2, 2):
i = main_id + 5 + k * j
if (i == main_id + 5 or i == main_id + 6):
continue
if cnt == fm:
img_id = i
pair_id = np.mod(f1, 2)
return int(img_id), pair_id
cnt += 1
return -1, -1
def compose_batch(batch):
# print('Composer!')
batch = batch[0]
n = len(batch['images'])
ims = np.asarray(batch['images']).astype(float)
# print('image range')
# print(np.max(ims))
# print(np.min(ims))
ims = torch.from_numpy(ims).float()
lines = batch['lines']
ln_lens = [l.shape[0] for l in lines]
if len(ln_lens)>0:
ln_max = np.max(np.asarray(ln_lens))
pt_num = lines[0].shape[1]
else:
ln_max = 0
pt_num = 0
n = 0
lines_torch = torch.zeros(n, ln_max, pt_num, 2)
for i in range(0, n):
lines_torch[i, 0:lines[i].shape[0], :, :] = torch.from_numpy(lines[i].astype(float)).float()
negs = batch['negatives']
poss = batch['positives']
if n > 0:
n_pos = len(poss[0])
max_neg = max([ni.shape[0] for ni in negs])
else:
max_neg = 0
n_pos = 0
neg_mask = -1*torch.ones(n, max_neg, n_pos).long()
if len(negs)>0:
for neg_i in negs:
neg_mask[i][0:neg_i.shape[0]] = torch.from_numpy(neg_i).long()
pos_t = -1*torch.ones(n, n_pos).long()
for i in range(0, len(poss)):
p = poss[i]
for j in range(0, len(p)):
if len(p[j]) == 1:
pos_t[i, j] = p[j][0]
fin_dict = batch
fin_dict['images'] = ims
fin_dict['lines'] = lines_torch
fin_dict['negatives'] = neg_mask
fin_dict['positives'] = pos_t
fin_dict['ln_lens'] = ln_lens
return fin_dict
def compose_infer_batch(batch):
n = len(batch)
ims = [b['images'] for b in batch]
ims = np.asarray(ims).astype(float)
ims = torch.from_numpy(ims).float()
lines = [b['lines'] for b in batch]
ln_lens = [l.shape[0] for l in lines]
if len(ln_lens)>0:
ln_max = np.max(np.asarray(ln_lens))
pt_num = lines[0].shape[1]
else:
ln_max = 0
pt_num = 0
n = 0
lines_torch = torch.zeros(n, ln_max, pt_num, 2)
# print(len(lines))
for i in range(0, n):
lines_torch[i, 0:lines[i].shape[0], :, :] = torch.from_numpy(lines[i].astype(float)).float()
fin_dict = {}
fin_dict['images'] = ims
fin_dict['lines'] = lines_torch
fin_dict['ln_lens'] = ln_lens
fin_dict['image_ids'] = [b['image_ids'] for b in batch]
fin_dict['pair_ids'] = [b['pair_ids'] for b in batch]
return fin_dict
def normalize_lines_for_gridsampler(im, li):
w = im.shape[2]
h = im.shape[1]
l = li.astype(float)
# print ('min - max x before ' + str(np.min(l[0, :]))+' ' + str(np.max(l[0, :])))
l[0, :] = (l[0, :] - w / 2.0) / (w / 2.0)
# print ('min - max x after ' + str(np.min(l[0, :])) + ' ' + str(np.max(l[0, :])))
# print ('2 min - max x before ' + str(np.min(l[2, :])) + ' ' + str(np.max(l[2, :])))
l[2, :] = (l[2, :] - w / 2.0) / (w / 2.0)
# print ('2 min - max x after ' + str(np.min(l[2, :])) + ' ' + str(np.max(l[2, :])))
l[1, :] = (l[1, :] - h / 2.0) / (h / 2.0)
l[3, :] = (l[3, :] - h / 2.0) / (h / 2.0)
return li
#Dataset description v1 (07.2018)
#lines: 5 x N matrix of ushort, with N number of lines in the image, and the rows are start_x, start_y, end_x, end_y, octave
#matching: N x M of uchar, where N is the number of detections in the frame, M is the number of tracks. Values are: 0 - not a match, 1 - possible match, 2 - detection from track
class DatasetBatchIndexer:
def prepare_pyramid_listed(self, im):
ims = []
for i in range(0, self.pyramid_levels):
ims.append(im)
im = cv2.resize(im, (int(np.floor(1.0/self.resize_factor*im.shape[1]+0.5)), int(np.floor(1.0/self.resize_factor*im.shape[0]+0.5))))
return ims
def read_batch(self, data):
frame_num = data.shape[0] / 8
line_num = data.shape[1]
# batch_data = np.zeros((2, 2, 2, line_num, frame_num), dtype=np.uint16)
line_lst = []
for li in range(0, line_num):
line_projs = []
for fi in range(0, frame_num):
dets = []
for di in range(0, 2): # detection index
seg = data[8 * fi + 4 * di: 8 * fi + 4 * di + 4, li]
if np.linalg.norm(seg) > 0:
dets.append(seg)
line_projs.append(dets)
line_lst.append(line_projs)
return line_lst
# for pti in range(0, 2): #pt start - end
# for ci in range(0, 2): #coordinate x-y
# batch_data[ci, pti, di, li, fi] = data[8*fi + 4*di + 2*pti + ci, li]
# return batch_data
def read_negatives(self, data):
neg_data = []
for fi in range(0, data.shape[0] / 8):
frame_lines = []
i = 0
while i < data.shape[1] and np.linalg.norm(data[:, i]) > 0:
line_seq = []
for di in range(0, 2):
if np.linalg.norm(data[8 * fi + 4 * di: 8 * fi + 4 * di + 4, i]) == 0:
continue
pt_start = data[8 * fi + 4 * di: 8 * fi + 4 * di + 2, i]
pt_end = data[8 * fi + 4 * di + 2:8 * fi + 4 * di + 4, i]
seg_data = data[8 * fi + 4 * di: 8 * fi + 4 * di + 4, i]
# line_seq.append([pt_start, pt_end])
line_seq.append(seg_data)
frame_lines.append(line_seq)
i += 1
neg_data.append(frame_lines)
return neg_data
def read_exceptions(self, data):
fnum = data.shape[0] / 10
batch_exc = []
for fi in range(0, fnum):
lnum = data.shape[1]
frame_exc = []
for li in range(0, lnum):
ei = 0
line_excs = []
while ei < 10 and data[10 * fi + ei, li] > 0:
line_excs.append(data[10 * fi + ei, li])
ei += 1
frame_exc.append(line_excs)
batch_exc.append(frame_exc)
return batch_exc
def __init__(self, kitti_root_dir, root_dir, seq_start=-1, seq_end=-1, seq_inds = [], is_color = False, is_pyramid = False, is_careful = True,
is_report_dist = False, pt_per_line = 5, do_sample_points = True, is_add_noise=False):
self.kitti_root_dir = kitti_root_dir
self.root_dir = root_dir
self.do_sample_points = do_sample_points
self.is_color = is_color
self.is_pyramid = is_pyramid
self.is_careful = is_careful
self.is_report_dist = is_report_dist
self.pyramid_levels = 4
self.resize_factor = 1.44
self.pt_per_line = pt_per_line
self.is_add_noise = is_add_noise
self.positives = {}
self.negatives = {}
self.exceptions = {}
self.exp_nums = {}
self.exp_num = 0
if len(seq_inds) == 0:
seq_inds = np.arange(seq_start, seq_end+1)
self.seq_inds = seq_inds
for si in range(0, len(seq_inds)):
seq_id = seq_inds[si]
seq_dir = root_dir + '/' + str(seq_id) + '/'
seq_data = {}
neg_data = {}
exc_data = {}
cnt = 0
flist = os.listdir(seq_dir)
self.exp_nums[si] = len(flist)
self.exp_num += len(flist)
def compose_frame_data(self, seq_id, call_id, f1):
pref = self.root_dir + '/' + str(seq_id) + '/' + str(call_id * 5) + '/' + str(f1)
line_data = cv2.imread(pref + '_l.png', -1)
if line_data is None:
line_data = np.zeros((0,0))
matches = cv2.imread(pref + '_m.png', -1)
if matches is None:
matches = np.zeros((0, 0))
negs_all = []
pos_all = []
for i in range(0, matches.shape[1]):
if self.is_careful:
negs = np.nonzero(matches[:, i] == 255)[0]
else:
negs = np.nonzero(matches[:, i] != 2)[0]
negs_all.append(negs)
poss = np.nonzero(matches[:, i] == 2)[0]
# print poss
pos_all.append(poss)
return line_data, negs_all, pos_all
def compose_frame_data_w_mask(self, seq_id, call_id, f1):
pref = self.root_dir + '/' + str(seq_id) + '/' + str(call_id * 5) + '/' + str(f1)
line_data = cv2.imread(pref + '_l.png', -1)
if line_data is None:
line_data = np.zeros((0,0))
matches = cv2.imread(pref + '_m.png', -1)
if matches is None:
matches = np.zeros((0, 0))
negs_all = []
pos_all = []
negs = (matches == 255).astype(long) #N x n_p
for i in range(0, matches.shape[1]):
poss = np.nonzero(matches[:, i] == 2)[0]
pos_all.append(poss)
return line_data, negs, pos_all
def get_label(self, i, tl):
s = str(i)
while len(s) < tl:
s = '0' + s
return s
def get_image_gs(self, call_id, seq_id, f1):
iid, pid = get_image_id(call_id, f1)
# self.img_ids.append(iid)
# self.pair_ids.append(pid)
if iid < 0 or pid < 0:
print 'error loading image'
if self.is_color:
pid = pid+2
lbl = self.kitti_root_dir + '/' + self.get_label(seq_id, 2) + '/image_' + str(pid) + '/' + \
self.get_label(iid,6) + '.png'
if self.is_color:
return cv2.imread(lbl)
else:
return cv2.imread(lbl, 0)
def format_image(self, im1):
im_size = [0,0]
lu = [0,0]
ims1 = []
if self.is_pyramid:
im_shape = im1.shape
ims1 = self.prepare_pyramid_listed(im1)
total_width = 0
for im in ims1:
total_width += im.shape[1]
lus1 = []
x = 0
for i in range(0, len(ims1)):
height = ims1[i].shape[0]
width = ims1[i].shape[1]
im_size = [height, width]
lu = [0, x]
x += width
else:
if self.is_color:
im1 = np.transpose(im1, (2, 0, 1))
else:
im1 = im1.reshape((1, im1.shape[0], im1.shape[1]))
im_size = im1.shape[1:3]
lu = [0, 0]
ims1 = im1
return ims1, im_size, lu
def format_images(self, im1, im2):
ims = []
im_sizes = []
lus = []
if self.is_pyramid:
im_shape = im1.shape
ims1 = self.prepare_pyramid_listed(im1)
ims2 = self.prepare_pyramid_listed(im2)
total_width = 0
for im in ims1:
total_width += im.shape[1]
ims = np.zeros((2, im1.shape[0], total_width), dtype=np.uint8)
lus1 = []
lus2 = []
x = 0
for i in range(0, len(ims1)):
height = ims1[i].shape[0]
width = ims1[i].shape[1]
ims[0, 0:height, x:x+width] = ims1[i]
ims[1, 0:height, x:x+width] = ims2[i]
im_sizes.append([height, width])
lus1.append([0, x])
lus2.append([0, x])
x += width
ims = np.asarray(ims)
ims = ims.reshape((2, 1, ims.shape[1], ims.shape[2]))
lus = [lus1, lus2]
else:
ims = [im1, im2]
ims = np.asarray(ims)
if self.is_color:
ims = np.transpose(ims, (0, 3, 1, 2))
else:
ims = ims.reshape((2,1,ims.shape[1], ims.shape[2]))
im_sizes.append(im1.shape[0:2])
lus = [[0,0], [0,0]]
return ims, im_sizes, lus
def format_lines_for_image_pyr(self, l, lus, im_sizes):
for i in range(0, l.shape[1]):
oct_ind = l[4, i]
if oct_ind > 0:
lu = lus[oct_ind]
k_h = float(im_sizes[oct_ind][0]) / im_sizes[0][0]
k_w = float(im_sizes[oct_ind][1]) / im_sizes[0][1]
l[0, i] = l[0, i] * k_w + lu[1]
l[2, i] = l[2, i] * k_w + lu[1]
l[1, i] = l[1, i] * k_h + lu[0]
l[3, i] = l[3, i] * k_h + lu[0]
return l
def format_lines(self, l_lst, im_sizes, lus):
if self.is_pyramid:
for ii in range(0,2):
l = l_lst[ii]
l_lst[ii] = self.format_lines_for_image_pyr(l, lus[ii], im_sizes)
return l_lst
else:
return l_lst
def normalize_points_for_gridsampler(self, im, li):
w = im.shape[2]
h = im.shape[1]
l = li.astype(float)
l[:, 0, :] = (l[:, 0, :] - w / 2.0) / (w / 2.0)
l[:, 1, :] = (l[:, 1, :] - h / 2.0) / (h / 2.0)
# print ('min - max x before ' + str(np.min(l[0, :]))+' ' + str(np.max(l[0, :])))
return l
# if do_sample_lines and not line_data.shape[0] == 0:
# def form_neg_mask(self,all_negs):
#
def get_multi_frame_batch(self, si, call_id, f_lst):
seq_id = self.seq_inds[si]
all_negs = []
all_pos = []
all_lines = []
self.img_ids = []
self.pair_ids = []
ims = []
ds = []
for f in f_lst:
iid, pid = get_image_id(call_id, f)
self.img_ids.append(iid)
self.pair_ids.append(pid)
line_data, negs_1, pos_1 = self.compose_frame_data_w_mask(seq_id, call_id, f)
if len(pos_1)==0:
all_negs.append([])
all_pos.append(None)
all_lines.append([])
ims.append([])
ds.append(-1)
continue
im1 = self.get_image_gs(call_id, seq_id, f)
if self.is_add_noise:
im1 = add_noise(im1)
im1, im_size1, lu1 = self.format_image(im1)
if self.do_sample_points:
line_data = line_sampler.prepare_grid_numpy_vec(line_data, 1.0, self.pt_per_line)
line_data = self.format_lines(line_data, im_size1, lu1)
# line_data = self.normalize_points_for_gridsampler(im1, line_data)
else:
# line_data = self.normalize_lines_for_gridsampler(im1, line_data)
line_data = self.format_lines(line_data, im_size1, lu1)
all_negs.append(negs_1)
all_pos.append(pos_1)
all_lines.append(line_data)
ims.append(im1)
ds.append(f)
# all_inds = f_lst
# print ('we sampled '+str(len(all_inds)))
# print(all_inds)
# rand_inds = np.random.choice(inds, n_lim-1, replace=False)
# print(rand_inds)
# all_inds = list(np.concatenate([np.zeros((1), dtype=int), rand_inds]))
# def subfilter(lst, inds):
# print inds
# return [lst[i] for i in inds]
fin_dict = {}
fin_dict['images'] = ims
# print(len(fin_dict['images']))
fin_dict['lines'] = all_lines
fin_dict['negatives'] = all_negs
fin_dict['positives'] = all_pos
# if self.is_report_dist:
fin_dict['distances'] = ds
fin_dict['image_ids'] = self.img_ids
fin_dict['pair_ids'] = self.pair_ids
#filter empty images
p_fin = []
neg_fin = []
lines_fin = []
ims_fin = []
iids_fin = []
pids_fin = []
ds_fin = []
for cnt in range(0, len(fin_dict['positives'])):
p = fin_dict['positives'][cnt]
if p is None:
continue
else:
p_fin.append(p)
neg_fin.append(fin_dict['negatives'][cnt])
lines_fin.append(fin_dict['lines'][cnt])
ims_fin.append(fin_dict['images'][cnt])
iids_fin.append(fin_dict['image_ids'][cnt])
pids_fin.append(fin_dict['pair_ids'][cnt])
ds_fin.append(fin_dict['distances'][cnt])
fin_dict['positives'] = p_fin
fin_dict['negatives'] = neg_fin
fin_dict['lines'] = lines_fin
fin_dict['images'] = ims_fin
fin_dict['image_ids'] = iids_fin
fin_dict['pair_ids'] = pids_fin
fin_dict['distances'] = ds_fin
return fin_dict
#we return
#a) width-concatenated images in the array of size 2xCxWxH
#b) line coords in a following format
# [L1, L2], where each Li is an array of size 5 x Ni, Ni is the number of lines, and each column is [sx, sy, ex, ey, ii],
# where (sx, sy) and (ex, ey) and line endpoints and ii is an image index to sample the line
#c) pos_matches - a list of pairs such as (i,j) where i is an index of a line in L1, j is an index
# of a line in L2 and these lines should have close descriptors. If i or j empty, no match in this image
#d) neg_matches - a list (N1, N2), each Ni is a list of negative matches, where at a a place k there is
# a list of negative matches for a positive pair number k in the list Li
def get_dual_frame_batch(self, seq_id, call_id, f1, f2, dwn_factor=1.0):
self.call_id = call_id
self.f1 = f1
self.f2 = f2
t0 = cv2.getCPUTickCount()
lines_1, negs_1, pos_1 = self.compose_frame_data(seq_id, call_id, f1)
lines_2, negs_2, pos_2 = self.compose_frame_data(seq_id, call_id, f2)
t1 = cv2.getCPUTickCount()
pos_matches = []
for p1i in range(0, len(pos_1)):
p1 = pos_1[p1i]
if len(pos_2) <= p1i:
p2 = []
else:
p2 = pos_2[p1i]
pos_matches.append([p1, p2])
if not dwn_factor == 1.0:
lines_1 = dwn_factor * lines_1
lines_2 = dwn_factor * lines_2
lines = [lines_1, lines_2]
neg_matches = [negs_1, negs_2]
t2 = cv2.getCPUTickCount()
self.img_ids = []
self.pair_ids = []
self.seq_id = seq_id
im1 = self.get_image_gs(call_id, seq_id, f1)
im2 = self.get_image_gs(call_id, seq_id, f2)
t3 = cv2.getCPUTickCount()
dsize = (0,0)
im1 = cv2.resize(im1, dsize, fx=dwn_factor, fy=dwn_factor, interpolation=cv2.INTER_LINEAR)
im2 = cv2.resize(im2, dsize, fx=dwn_factor, fy=dwn_factor, interpolation=cv2.INTER_LINEAR)
# ims, im_sizes, lus = self.format_images(im1, im2)
ims1, im_size1, lu1 = self.format_image(im1)
ims2, im_size2, lu2 = self.format_image(im2)
ims = np.stack((ims1, ims2), axis=0)
im_sizes = im_size1
lus = [lu1, lu2]
lines = self.format_lines(lines, im_sizes, lus)
lines = [normalize_lines_for_gridsampler(ims[0], lines[0]),
normalize_lines_for_gridsampler(ims[1], lines[1])]
if self.is_report_dist:
return ims, lines, neg_matches, pos_matches, f2-f1, self.img_ids, self.pair_ids
return ims, lines, neg_matches, pos_matches
class DatasetBatch(Dataset):
def __init__(self, kitti_root_dir, root_dir, seq_inds =[], test_mode=False, is_color=False, is_pyramid=False,
is_careful=True, dwn_factor=1.0, is_rep_dist=False, batch_mode=False, full_pass=False, pt_per_line=5,
n_lim=6, is_noisy=False):
self.indexer = DatasetBatchIndexer(kitti_root_dir, root_dir, seq_inds=seq_inds, is_color=is_color, is_pyramid=is_pyramid,
is_careful=is_careful, is_report_dist=is_rep_dist, pt_per_line=pt_per_line, is_add_noise = is_noisy)
self.dwn_factor = dwn_factor
self.bs = 22-1
self.exp_nums = {}
self.exp_num = 0
self.seq_ids = []
self.test_mode = test_mode
self.batch_mode = batch_mode
self.full_pass = full_pass
if batch_mode:
self.n_lim = n_lim
all_inds = np.arange(0, 22)
n = int(len(all_inds)/self.n_lim) + 1
self.part_inds = []
for i in range(0, n):
curr_ind = i * self.n_lim
i_max = curr_ind+self.n_lim
if i_max >= len(all_inds):
i_max = len(all_inds)
self.part_inds.append(all_inds[curr_ind:i_max])
for i in self.indexer.exp_nums:
n = self.indexer.exp_nums[i]
# if test_mode:
self.exp_nums[i] = n * self.bs #* (self.bs - 1) / 2
if self.batch_mode:
self.exp_nums[i] = n
if self.full_pass:
self.exp_nums[i] = n * len(self.part_inds)
# else:
# self.exp_nums[i] = n
self.exp_num += self.exp_nums[i]
self.seq_ids.append(i)
def __len__(self):
if self.test_mode:
return 100
return self.exp_num
def sample_multiframe_randomly(self):
n_lim = self.n_lim
inds = list(np.arange(1, self.bs+1))
all_inds = [0]
slotsize = len(inds) / (n_lim - 1) + 1
# print(slotsize)
for i in range(0, n_lim - 1):
if len(all_inds) == n_lim:
continue
si = i * slotsize
ei = (i + 1) * slotsize
ei = min(ei, len(inds))
cur_inds = inds[si:ei]
# print(si)
# print(ei)
# print(cur_inds)
new_ind = np.random.choice(cur_inds, 1)
all_inds.append(new_ind[0])
return all_inds
def sample_multiframe_predefined(self, part_id):
return self.part_inds[part_id]
def __getitem__(self, idx):
seq_cnt = 0
seq_id = self.seq_ids[seq_cnt]
agg_sum = 0
while idx >= agg_sum:
agg_sum += self.exp_nums[seq_id]
seq_cnt += 1
if seq_cnt < len(self.seq_ids):
seq_id = self.seq_ids[seq_cnt]
seq_cnt -= 1
seq_id = self.seq_ids[seq_cnt]
agg_sum -= self.exp_nums[seq_id]
idx = idx - agg_sum
if self.batch_mode:
if self.full_pass:
idx0 = int(idx /len(self.part_inds))
part_id = idx - idx0 * len(self.part_inds)
inds = self.sample_multiframe_predefined(part_id)
mfb = self.indexer.get_multi_frame_batch(seq_id, idx0, inds)
else:
inds = self.sample_multiframe_randomly()
mfb = self.indexer.get_multi_frame_batch(seq_id, idx, inds)
if len(mfb['images']) == 0:
if idx < len(self) - 1:
return self.__getitem__(idx + 1)
else:
return self.__getitem__(0)
else:
return mfb
else:
# if self.test_mode:
batch_data_len = self.bs # * (self.bs - 1) / 2
call_id = int(np.floor(idx / batch_data_len))
pair_id = idx - call_id * batch_data_len
f1 = 0
f2 = pair_id + 1
return self.indexer.get_dual_frame_batch(seq_id, call_id, f1, f2, self.dwn_factor)
# else:
# call_id = idx
# all_inds = list(np.arange(0, self.bs))
# f1, f2 = random.sample(all_inds, 2)
# return self.indexer.get_dual_frame_batch(seq_id, call_id, f1, f2)
def get_combined_training_v2(pt_per_line=5, n_lim=6, is_noisy=False):
kitti_path = '../kittieuroc/'
data_path = '../batched/'
is_pyramid = False
is_careful = True
downsample_factor = 1.0
kwargs = {'num_workers': 4, 'pin_memory': True}
train_dataset = DatasetBatch(kitti_path, data_path, seq_inds = [0,1,2,3,4,5,6,7,8,9,12,13,20,21],
test_mode=False,
is_color=False, is_pyramid=is_pyramid, is_careful=is_careful,
dwn_factor=downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim, is_noisy=is_noisy) # 3,0
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=1,
shuffle=True,
collate_fn=compose_batch,
**kwargs)
test_dataset = DatasetBatch(kitti_path, data_path, [10], test_mode=False, is_color=False, is_pyramid=is_pyramid,
is_careful=is_careful,
dwn_factor=downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim, full_pass=True, is_noisy=is_noisy) # 3,0
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=1,
shuffle=False,
collate_fn=compose_batch,
**kwargs)
test_dataset_2 = DatasetBatch(kitti_path, data_path, [11], False, False, is_pyramid, is_careful,
dwn_factor=downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim,
full_pass=True) # 3,0
test_loader_2 = torch.utils.data.DataLoader(test_dataset_2,
batch_size=1,
shuffle=False,
collate_fn=compose_batch,
**kwargs)
return train_loader, test_loader, test_loader_2
def get_combined_test(seq_id, pt_per_line=5, n_lim=6):
kitti_path = '/media/hpc2_storage/avakhitov/kittieuroc/'
data_path = '/media/hpc2_storage/avakhitov/kittieuroc/batched/'
is_pyramid = False
is_careful = True
downsample_factor = 1.0
kwargs = {'num_workers': 4, 'pin_memory': True}
test_dataset = DatasetBatch(kitti_path, data_path, [seq_id], False, False, is_pyramid, is_careful,
downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim, full_pass=True) # 3,0
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=1,
shuffle=False,
collate_fn=compose_batch,
**kwargs)
return test_loader
class InferenceDataset(Dataset):
def __init__(self, datadict, pt_per_line=5):
self.datadict = datadict
self.pt_per_line = pt_per_line
def __len__(self):
return len(self.datadict)
def __getitem__(self, idx):
img_fullpath, dets_fullpath, id, pair_id = self.datadict[idx]
img = cv2.imread(img_fullpath, 0)
img = np.reshape(img, (1, img.shape[0], img.shape[1]))
lines = cv2.imread(dets_fullpath, -1)
lines = line_sampler.prepare_grid_numpy_vec(lines, 1.0, self.pt_per_line)
fin_dict = {}
fin_dict['images'] = img
# print(len(fin_dict['images']))
fin_dict['lines'] = lines
fin_dict['image_ids'] = id
fin_dict['pair_ids'] = pair_id
return fin_dict
| 28,142 | 36.22619 | 177 | py |
xcos | xcos-master/src/main.py | import os
import argparse
# import warnings
import torch
from utils.logging_config import logger
from pipeline import TrainingPipeline, TestingPipeline, EvaluationPipeline
def main(args):
# load config file from checkpoint, this will include the training information (epoch, optimizer parameters)
if args.resume is not None:
logger.info(f"Resuming checkpoint: {args.resume} ...")
resumed_checkpoint = torch.load(args.resume)
else:
resumed_checkpoint = None
args.resumed_checkpoint = resumed_checkpoint
if args.device:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
##################
# Setup pipeline #
##################
if args.mode == 'train':
pipeline = TrainingPipeline(args)
elif args.mode == 'test':
pipeline = TestingPipeline(args)
elif args.mode == 'eval':
pipeline = EvaluationPipeline(args)
else:
raise NotImplementedError(f'Mode {args.mode} not defined.')
################
# Run pipeline #
################
pipeline.run()
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Template')
parser.add_argument(
'-tc', '--template_config', default=None, type=str,
help=('Template configuraion file. It should contain all default configuration '
'and will be overwritten by specified config.')
)
parser.add_argument(
'-sc', '--specified_configs', default=None, type=str, nargs='+',
help=('Specified configuraion files. They serve as experiemnt controls and will '
'overwrite template configs.')
)
parser.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
parser.add_argument('-p', '--pretrained', default=None, type=str,
help='path to pretrained checkpoint (default: None)')
parser.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
parser.add_argument('--mode', type=str, choices=['train', 'test', 'eval'], default='train')
parser.add_argument('--saved_keys', default=['data_target', 'model_output'], type=str, nargs='+',
help='Specify the keys to save at testing mode.')
parser.add_argument('--ckpts_subdir', type=str, default='ckpts', help='Subdir name for ckpts saving.')
parser.add_argument('--outputs_subdir', type=str, default='outputs', help='Subdir name for outputs saving.')
args = parser.parse_args()
# Set template config to default if not given
if args.template_config is None:
args.template_config = f'configs/template_{args.mode}_config.json'
return args
if __name__ == '__main__':
# with warnings.catch_warnings():
# warnings.simplefilter('error')
args = parse_args()
main(args)
| 2,920 | 35.5125 | 112 | py |
xcos | xcos-master/src/GradCam.py | from PIL import Image
import cv2
import numpy as np
import torch.nn.functional as F
from model.xcos_modules import l2normalize
class GradientExtractor:
""" Extracting activations and
registering gradients from targetted intermediate layers
"""
def __init__(self, model):
self.model = model
def __call__(self, img1, img2):
self.gradients = []
feat1, out1 = self.forward(img1)
feat2, out2 = self.forward(img2)
return feat1, feat2, out1, out2
def save_gradient(self, grad):
self.gradients.append(grad)
def forward(self, x):
feats = []
x = self.model.input_layer(x)
x = self.model.body(x)
x.register_hook(self.save_gradient)
feats.append(x)
x = self.model.output_layer(x)
return feats, x
class ModelOutputs:
""" Making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers.
"""
def __init__(self, model):
self.model = model
self.extractor = GradientExtractor(self.model)
def get_grads(self):
return self.extractor.gradients
def __call__(self, img1, img2):
feat1, feat2, out1, out2 = self.extractor(img1, img2)
out1 = l2normalize(out1)
out2 = l2normalize(out2)
cos = F.cosine_similarity(out1, out2, dim=1, eps=1e-6)
return feat1, feat2, cos
class FaceGradCam:
def __init__(self, model):
self.model = model
self.extractor = ModelOutputs(self.model)
def __call__(self, img1, img2):
feat1, feat2, output = self.extractor(img1, img2)
self.model.zero_grad()
output.backward(retain_graph=True)
grads = self.extractor.get_grads()
hm1 = self.make_heatmap(grads[0].cpu().data.numpy(), feat1[0].cpu().data.numpy())
hm2 = self.make_heatmap(grads[1].cpu().data.numpy(), feat2[0].cpu().data.numpy())
return hm1, hm2
def make_heatmap(self, grad, feat):
"""Batch operation supported
"""
weights = np.mean(grad, axis=(-2, -1), keepdims=True)
x = weights * feat
x = x.sum(axis=1)
x = np.maximum(0, x)
x = x - np.min(x, axis=(-2, -1), keepdims=True)
x = x / np.max(x, axis=(-2, -1), keepdims=True)
x = 1. - x
return x
def make_img(self, heatmap, size, ori_img=None):
"""Batch operation NOT suppored
"""
hm = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
hm = cv2.resize(hm, size)
if ori_img is not None:
hm = np.float32(hm) / 255 + np.transpose(ori_img.numpy(), (1, 2, 0)) * 0.5 + 0.5
hm /= np.max(hm)
hm = np.uint8(255 * hm)
return Image.fromarray(hm)
| 2,847 | 27.48 | 92 | py |
xcos | xcos-master/src/pipeline/base_pipeline.py | import os
import json
import datetime
import logging
from abc import ABC, abstractmethod
import torch
import pandas as pd
from utils.util import get_instance
from utils.visualization import WriterTensorboard
from utils.logging_config import logger
from utils.global_config import global_config
import data_loader.data_loaders as module_data
import model.metric as module_metric
import model.model as module_arch
class BasePipeline(ABC):
"""
Base pipeline for training/validation/testing process
"""
def __init__(
self, args
):
global_config.setup(args.template_config, args.specified_configs, args.resumed_checkpoint)
self.start_time = datetime.datetime.now().strftime('%m%d_%H%M%S')
self.saving_dir = self._create_saving_dir(args)
self._add_logging_file_handler()
self._save_config_file()
self._print_config_messages()
self.device, self.device_ids = self._setup_device()
self.data_loader = self._setup_data_loader()
self.valid_data_loaders = self._setup_valid_data_loaders()
self.test_data_loaders = self._setup_test_data_loaders()
self.optimize_strategy = global_config.get('optimize_strategy', 'normal')
self.validation_strategy = global_config.get('validation_strategy', self.optimize_strategy)
self._setup_model()
self._setup_data_parallel()
self._setup_writer()
self.evaluation_metrics = self._setup_evaluation_metrics()
self._setup_pipeline_specific_attributes()
self._setup_config()
if args.resumed_checkpoint is not None:
self._resume_checkpoint(args.resumed_checkpoint)
if args.pretrained is not None:
self._load_pretrained(args.pretrained)
self.worker_outputs = {}
self.workers = self._create_workers()
@abstractmethod
def _setup_config(self):
pass
def _setup_pipeline_specific_attributes(self):
pass
@abstractmethod
def _create_workers(self):
return []
# =============== functions for setting up attributes (start) ================
@abstractmethod
def _create_saving_dir(self, resume_path):
""" Create directory to save ckpt, config, and logges messags. Return the created path """
pass
def _save_config_file(self):
# Save configuration file into checkpoint directory
config_save_path = os.path.join(self.saving_dir, 'config.json')
with open(config_save_path, 'w') as handle:
json.dump(global_config, handle, indent=4, sort_keys=False)
def _add_logging_file_handler(self):
fileHandler = logging.FileHandler(os.path.join(self.saving_dir, 'log.txt'))
logger.addHandler(fileHandler)
def _print_config_messages(self):
global_config.print_changed()
logger.info(f'Experiment name: {global_config["name"]}')
def _setup_device(self):
def prepare_device(n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
logger.warning(
"Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
msg = (f"Warning: The number of GPU\'s configured to use is {n_gpu_use} "
f"but only {n_gpu} are available on this machine.")
logger.warning(msg)
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
device, device_ids = prepare_device(global_config['n_gpu'])
return device, device_ids
def _setup_model(self):
""" Setup model and print summary """
model = get_instance(
module_arch, 'arch', global_config,
)
# Print out the model architecture and number of parameters
model.summary()
self.model = model.to(self.device)
def _setup_data_parallel(self):
if len(self.device_ids) > 1:
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
def _get_non_parallel_model(self):
model = self.model.module if isinstance(self.model, torch.nn.DataParallel) else self.model
return model
def _setup_data_loader(self, key='data_loader'):
return get_instance(module_data, key, global_config)
def _setup_data_loaders(self, key):
data_loaders = [
getattr(module_data, entry['type'])(**entry['args'])
for entry in global_config[key].values()
]
return data_loaders
def _setup_valid_data_loaders(self):
if 'valid_data_loaders' in global_config.keys():
valid_data_loaders = self._setup_data_loaders('valid_data_loaders')
if self.data_loader.validation_split > 0:
raise ValueError(f'Split ratio should not > 0 when other validation loaders are specified.')
elif self.data_loader.validation_split > 0:
valid_data_loaders = [self.data_loader.split_validation()]
else:
valid_data_loaders = []
return valid_data_loaders
def _setup_test_data_loaders(self):
return None
def _setup_evaluation_metrics(self):
evaluation_metrics = [
getattr(module_metric, entry['type'])(**entry['args']).to(self.device)
for entry in global_config['metrics'].values()
]
return evaluation_metrics
def _setup_optimizers(self):
""" Setup optimizers according to configuration.
Each optimizer has its corresponding network(s) to train, specified by 'target_network' in configuraion.
If no `target_network` is specified, all parameters of self.model will be included.
"""
self.optimizers = {}
for name, entry in global_config['optimizers'].items():
model = self._get_non_parallel_model()
if 'target_network' in entry.keys():
network = getattr(model, entry['target_network'])
else:
network = model
logger.warning(f'Target network of optimizer "{name}" not specified. '
f'All params of self.model will be included.')
trainable_params = filter(lambda p: p.requires_grad, network.parameters())
self.optimizers[name] = getattr(torch.optim, entry['type'])(trainable_params, **entry['args'])
def _setup_writer(self):
# setup visualization writer instance
writer_dir = os.path.join(global_config['visualization']['log_dir'], global_config['name'], self.start_time)
self.writer = WriterTensorboard(writer_dir, logger, global_config['visualization']['tensorboardX'])
self.start_epoch = 1
self.train_iteration_count = 0
self.valid_iteration_counts = [0] * len(self.valid_data_loaders)
# =============== functions for setting up attributes (start) ================
def _load_pretrained(self, pretrained_path):
""" Load pretrained model not strictly """
logger.info(f"Loading pretrained checkpoint: {pretrained_path} ...")
checkpoint = torch.load(pretrained_path)
model = self._get_non_parallel_model()
model.load_state_dict(checkpoint['state_dict'], strict=False)
def _resume_checkpoint(self, resumed_checkpoint):
"""
Resume from saved resumed_checkpoints
:param resume_path: resumed_checkpoint path to be resumed
"""
self._resume_model_params(resumed_checkpoint)
from .training_pipeline import TrainingPipeline
if isinstance(self, TrainingPipeline):
self._resume_training_state(resumed_checkpoint)
logger.info(f"resumed_checkpoint (trained epoch {self.start_epoch - 1}) loaded")
def _resume_training_state(self, resumed_checkpoint):
""" States only for training pipeline like iteration counts, optimizers,
and lr_schedulers are resumed in this function """
self.start_epoch = resumed_checkpoint['epoch'] + 1
self.monitor_best = resumed_checkpoint['monitor_best']
# Estimated iteration_count is based on length of the current data loader,
# which will be wrong if the batch sizes between the two training processes are different.
self.train_iteration_count = resumed_checkpoint.get('train_iteration_count', 0)
self.valid_iteration_counts = resumed_checkpoint.get(
'valid_iteration_counts', [0] * len(self.valid_data_loaders))
self.valid_iteration_counts = list(self.valid_iteration_counts)
# load optimizer state from resumed_checkpoint only when optimizer type is not changed.
optimizers_ckpt = resumed_checkpoint['optimizers']
for key in global_config['optimizers'].keys():
if key not in optimizers_ckpt.keys():
logger.warning(f'Optimizer name {key} in config file is not in checkpoint (not resumed)')
elif resumed_checkpoint['config']['optimizers'][key]['type'] != global_config['optimizers'][key]['type']:
logger.warning(f'Optimizer type in config file is different from that of checkpoint (not resumed)')
else:
self.optimizers[key].load_state_dict(optimizers_ckpt[key])
def _resume_model_params(self, resumed_checkpoint):
""" Load model parameters from resumed checkpoint """
# load architecture params from resumed_checkpoint.
if resumed_checkpoint['config']['arch'] != global_config['arch']:
logger.warning(
'Warning: Architecture config given in config file is different from that of resumed_checkpoint. '
'This may yield an exception while state_dict is being loaded.'
)
model = self._get_non_parallel_model()
model.load_state_dict(resumed_checkpoint['state_dict'])
def _print_and_write_log(self, epoch, worker_outputs, write=True):
# This function is to print out epoch summary of workers
# and append these summary values on the summary csv file.
if write:
self.writer.set_step(epoch, 'epoch_average') # TODO: See if we can use tree-structured tensorboard logging
logger.info(f' epoch: {epoch:d}')
epoch_record = {'epoch': epoch}
# print the logged info for each loader (corresponding to each worker)
for loader_name, output in worker_outputs.items():
log = output['log']
if global_config.verbosity >= 1:
logger.info(f' {loader_name}:')
for key, value in log.items():
if global_config.verbosity >= 1:
logger.info(f' {str(key):20s}: {value:.4f}')
if 'elapsed_time' not in key and write:
value = value.item() if isinstance(value, torch.Tensor) else value
epoch_record[f'{loader_name}_{key}'] = [value]
# TODO: See if we can use tree-structured tensorboard logging
self.writer.add_scalar(f'{loader_name}_{key}', value)
# concatenate summary of this epoch into 'epochs_summary.csv'
new_df = pd.DataFrame(epoch_record)
csv_file = os.path.join(self.saving_dir, 'epochs_summary.csv')
df = pd.concat([pd.read_csv(csv_file), new_df]) if os.path.exists(csv_file) else new_df
df.to_csv(csv_file, index=False)
| 11,688 | 43.109434 | 119 | py |
xcos | xcos-master/src/pipeline/testing_pipeline.py | import os
import numpy as np
from .base_pipeline import BasePipeline
from worker.tester import Tester
from utils.global_config import global_config
from utils.util import ensure_dir
from utils.logging_config import logger
class TestingPipeline(BasePipeline):
def __init__(self, args):
"""
# You may need this line to solve the error described in https://github.com/pytorch/pytorch/issues/973
torch.multiprocessing.set_sharing_strategy('file_system')
"""
super().__init__(args)
def _create_saving_dir(self, args):
saving_dir = os.path.join(global_config['trainer']['save_dir'], args.outputs_subdir,
global_config['name'])
if os.path.exists(saving_dir):
logger.warning(f'The saving directory "{saving_dir}" already exists. '
f'If continued, some files might be overwriten.')
response = input('Proceed? [y/N] ')
if response != 'y':
logger.info('Exit.')
exit()
ensure_dir(saving_dir)
if args.resume is not None:
link = os.path.join(saving_dir, 'resumed_ckpt.pth')
if os.path.exists(link):
os.remove(link)
# Mark the used resume path by a symbolic link
os.symlink(os.path.abspath(args.resume), link)
return saving_dir
def _setup_data_loader(self):
return None
def _setup_valid_data_loaders(self):
return []
def _setup_config(self):
pass
def _create_workers(self):
workers = []
# Add a tester for each data loader
for test_data_loader in self.test_data_loaders:
tester = Tester(pipeline=self, test_data_loader=test_data_loader)
workers += [tester]
return workers
def _save_inference_results(self, name: str, worker_output: dict):
path = os.path.join(self.saving_dir, f'{name}_output.npz')
logger.info(f'Saving {path} ...')
np.savez(path, **worker_output)
def _setup_test_data_loaders(self):
if 'test_data_loaders' in global_config.keys():
test_data_loaders = self._setup_data_loaders('test_data_loaders')
return test_data_loaders
else:
raise ValueError(f"No test_data_loaders key in config")
def run(self):
"""
Full testing pipeline logic
"""
for worker in self.workers:
worker_output = worker.run(0)
if not global_config.save_while_infer:
self._save_inference_results(worker.data_loader.name, worker_output['saved'])
self.worker_outputs[worker.data_loader.name] = worker_output
self._print_and_write_log(0, self.worker_outputs, write=True)
| 2,805 | 34.518987 | 110 | py |
xcos | xcos-master/src/pipeline/training_pipeline.py | import math
import os
import torch
from .base_pipeline import BasePipeline
from worker.trainer import Trainer
from worker.validator import Validator
import model.loss as module_loss
from utils.global_config import global_config
from utils.logging_config import logger
from utils.util import ensure_dir
class TrainingPipeline(BasePipeline):
def __init__(self, args):
super().__init__(args)
def _setup_pipeline_specific_attributes(self):
self._setup_loss_functions()
if self.optimize_strategy == 'GAN':
self._setup_gan_loss_functions()
self._setup_optimizers()
self._setup_lr_schedulers()
def _create_saving_dir(self, args):
saving_dir = os.path.join(global_config['trainer']['save_dir'], args.ckpts_subdir,
global_config['name'], self.start_time)
ensure_dir(saving_dir)
# create a link to the resumed checkpoint as a reference
if args.resume is not None:
link = os.path.join(saving_dir, 'resumed_ckpt.pth')
os.symlink(os.path.abspath(args.resume), link)
return saving_dir
def _setup_loss_functions(self):
self.loss_functions = [
getattr(module_loss, entry['type'])(**entry['args']).to(self.device)
for key, entry in global_config['losses'].items()
]
def _setup_gan_loss_functions(self):
""" Setup GAN loss functions. Will only be called when self.optimize_strategy == 'GAN'
The keys of gan_losses in config should have strict one-to-one mapping with names of optimizers. """
self.gan_loss_functions = {
key: getattr(module_loss, entry['type'])(**entry['args']).to(self.device)
for key, entry in global_config['gan_losses'].items()
}
def _setup_lr_schedulers(self):
""" Setup learning rate schedulers according to configuration. Note that the naming of
optimizers and lr_schedulers in configuration should have a strict one-to-one mapping.
"""
self.lr_schedulers = {}
for optimizer_name, optimizer in self.optimizers.items():
entry = global_config['lr_schedulers'][optimizer_name]
self.lr_schedulers[optimizer_name] = getattr(torch.optim.lr_scheduler, entry['type'])(
optimizer, **entry['args'])
def _create_workers(self):
trainer = Trainer(
self, self.data_loader, self.train_iteration_count
)
workers = [trainer]
for i, valid_data_loader in enumerate(self.valid_data_loaders):
workers.append(
Validator(
self, valid_data_loader, self.valid_iteration_counts[i]
)
)
return workers
def _setup_config(self):
self.epochs = global_config['trainer']['epochs']
self.save_freq = global_config['trainer']['save_freq']
# configuration to monitor model performance and save best
self.monitored_loader = global_config['trainer']['monitored_loader']
valid_loader_names = [loader.name for loader in self.valid_data_loaders]
assert self.monitored_loader in valid_loader_names, \
f"Config monitored loader '{self.monitored_loader}' is not in validation data loaders {valid_loader_names}"
self.monitored_metric = global_config['trainer']['monitored_metric']
valid_metric_names = [f"avg_{metric.nickname}" for metric in self.evaluation_metrics] + ["avg_loss"]
assert self.monitored_metric in valid_metric_names, \
f"Config monitored metric '{self.monitored_metric}' is not in valid evaluation metrics {valid_metric_names}"
self.monitor_mode = global_config['trainer']['monitor_mode']
assert self.monitor_mode in ['min', 'max', 'off']
self.monitor_best = math.inf if self.monitor_mode == 'min' else -math.inf
self.do_validation = len(self.valid_data_loaders) > 0
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param save_best: if True, add '-best.pth' at the end of the best model
"""
arch = type(self.model).__name__
# assure that we save the model state without DataParallel module
if isinstance(self.model, torch.nn.DataParallel):
# get the original state out from DataParallel module
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
state = {
'arch': arch,
'epoch': epoch,
'state_dict': model_state,
'optimizers': {key: optimizer.state_dict() for key, optimizer in self.optimizers.items()},
'monitor_best': self.monitor_best,
'config': global_config,
'train_iteration_count': self.train_iteration_count,
'valid_iteration_counts': self.valid_iteration_counts,
}
best_str = '-best' if save_best else ''
monitored_name = f'{self.monitored_loader}_{self.monitored_metric}'
filename = os.path.join(
self.saving_dir, f'ckpt-ep{epoch:04d}-{monitored_name}{self.monitor_best:.4f}{best_str}.pth'
)
torch.save(state, filename)
logger.info(f"Saving checkpoint: {filename} ...")
def _check_and_save_best(self, epoch, worker_outputs):
"""
Evaluate model performance according to configured metric, save best checkpoint as model_best
"""
best = False
if self.monitor_mode != 'off':
try:
metric_value = worker_outputs[self.monitored_loader]['log'][self.monitored_metric]
if (self.monitor_mode == 'min' and metric_value < self.monitor_best) or\
(self.monitor_mode == 'max' and metric_value > self.monitor_best):
self.monitor_best = metric_value
best = True
except KeyError:
if epoch == 1:
msg = f"Warning: Can\'t recognize metric '{self.monitored_metric}' in '{self.monitored_loader}' "\
+ f"for performance monitoring. model_best checkpoint won\'t be updated."
logger.warning(msg)
if epoch % self.save_freq == 0 or best:
self._save_checkpoint(epoch, save_best=best)
def _after_epoch(self, epoch, worker_outputs):
self._print_and_write_log(epoch, worker_outputs)
self._check_and_save_best(epoch, worker_outputs)
if self.lr_schedulers is not None:
for scheduler in self.lr_schedulers.values():
scheduler.step()
def run(self):
"""
Full training pipeline logic
"""
for epoch in range(self.start_epoch, self.epochs + 1):
for worker in self.workers:
worker_output = worker.run(epoch)
self.worker_outputs[worker.data_loader.name] = worker_output
self._after_epoch(epoch, self.worker_outputs)
| 7,092 | 41.728916 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.